You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ch...@apache.org on 2016/08/15 07:08:46 UTC

[01/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Repository: incubator-carbondata
Updated Branches:
  refs/heads/master d545910c0 -> 6a2c504fd


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java b/core/src/main/java/org/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java
deleted file mode 100644
index 7ab350a..0000000
--- a/core/src/main/java/org/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.result.iterator;
-
-import java.util.List;
-
-import org.carbondata.common.CarbonIterator;
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.carbon.datastore.DataRefNodeFinder;
-import org.carbondata.core.carbon.datastore.impl.btree.BTreeDataRefNodeFinder;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.util.CarbonProperties;
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.model.QueryModel;
-import org.carbondata.scan.processor.AbstractDataBlockIterator;
-import org.carbondata.scan.processor.impl.DataBlockIteratorImpl;
-
-/**
- * In case of detail query we cannot keep all the records in memory so for
- * executing that query are returning a iterator over block and every time next
- * call will come it will execute the block and return the result
- */
-public abstract class AbstractDetailQueryResultIterator extends CarbonIterator {
-
-  /**
-   * LOGGER.
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(AbstractDetailQueryResultIterator.class.getName());
-
-  /**
-   * execution info of the block
-   */
-  protected List<BlockExecutionInfo> blockExecutionInfos;
-
-  /**
-   * number of cores which can be used
-   */
-  private int batchSize;
-
-  /**
-   * file reader which will be used to execute the query
-   */
-  protected FileHolder fileReader;
-
-  protected AbstractDataBlockIterator dataBlockIterator;
-
-  protected boolean nextBatch = false;
-
-  public AbstractDetailQueryResultIterator(List<BlockExecutionInfo> infos, QueryModel queryModel) {
-    String batchSizeString =
-        CarbonProperties.getInstance().getProperty(CarbonCommonConstants.DETAIL_QUERY_BATCH_SIZE);
-    if (null != batchSizeString) {
-      try {
-        batchSize = Integer.parseInt(batchSizeString);
-      } catch (NumberFormatException ne) {
-        LOGGER.error("Invalid inmemory records size. Using default value");
-        batchSize = CarbonCommonConstants.DETAIL_QUERY_BATCH_SIZE_DEFAULT;
-      }
-    } else {
-      batchSize = CarbonCommonConstants.DETAIL_QUERY_BATCH_SIZE_DEFAULT;
-    }
-
-    this.blockExecutionInfos = infos;
-    this.fileReader = FileFactory.getFileHolder(
-        FileFactory.getFileType(queryModel.getAbsoluteTableIdentifier().getStorePath()));
-    intialiseInfos();
-  }
-
-  private void intialiseInfos() {
-    for (BlockExecutionInfo blockInfo : blockExecutionInfos) {
-      DataRefNodeFinder finder = new BTreeDataRefNodeFinder(blockInfo.getEachColumnValueSize());
-      DataRefNode startDataBlock = finder
-          .findFirstDataBlock(blockInfo.getDataBlock().getDataRefNode(), blockInfo.getStartKey());
-      DataRefNode endDataBlock = finder
-          .findLastDataBlock(blockInfo.getDataBlock().getDataRefNode(), blockInfo.getEndKey());
-      long numberOfBlockToScan = endDataBlock.nodeNumber() - startDataBlock.nodeNumber() + 1;
-      blockInfo.setFirstDataBlock(startDataBlock);
-      blockInfo.setNumberOfBlockToScan(numberOfBlockToScan);
-    }
-  }
-
-  @Override public boolean hasNext() {
-    if ((dataBlockIterator != null && dataBlockIterator.hasNext()) || nextBatch) {
-      return true;
-    } else {
-      return blockExecutionInfos.size() > 0;
-    }
-  }
-
-  protected void updateDataBlockIterator() {
-    if (dataBlockIterator == null || !dataBlockIterator.hasNext()) {
-      dataBlockIterator = getDataBlockIterator();
-      while (dataBlockIterator != null && !dataBlockIterator.hasNext()) {
-        dataBlockIterator = getDataBlockIterator();
-      }
-    }
-  }
-
-  private DataBlockIteratorImpl getDataBlockIterator() {
-    if(blockExecutionInfos.size() > 0) {
-      BlockExecutionInfo executionInfo = blockExecutionInfos.get(0);
-      blockExecutionInfos.remove(executionInfo);
-      return new DataBlockIteratorImpl(executionInfo, fileReader, batchSize);
-    }
-    return null;
-  }
-
-
-}


[12/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/CarbonLoadStatisticsImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonLoadStatisticsImpl.java b/core/src/main/java/org/carbondata/core/util/CarbonLoadStatisticsImpl.java
deleted file mode 100644
index 3a56db2..0000000
--- a/core/src/main/java/org/carbondata/core/util/CarbonLoadStatisticsImpl.java
+++ /dev/null
@@ -1,413 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.carbondata.core.util;
-
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-
-/**
- * A util which provide methods used to record time information druing data loading.
- */
-public class CarbonLoadStatisticsImpl implements LoadStatistics {
-  private CarbonLoadStatisticsImpl() {
-
-  }
-
-  private static CarbonLoadStatisticsImpl carbonLoadStatisticsImplInstance =
-          new CarbonLoadStatisticsImpl();
-
-  public static CarbonLoadStatisticsImpl getInstance() {
-    return carbonLoadStatisticsImplInstance;
-  }
-
-  private final LogService LOGGER =
-          LogServiceFactory.getLogService(CarbonLoadStatisticsImpl.class.getName());
-
-  /*
-   *We only care about the earliest start time(EST) and the latest end time(LET) of different
-   *threads, who does the same thing, LET - EST is the cost time of doing one thing using
-   *multiple thread.
- */
-  private long loadCsvfilesToDfStartTime = 0;
-  private long loadCsvfilesToDfCostTime = 0;
-  private long dicShuffleAndWriteFileTotalStartTime = 0;
-  private long dicShuffleAndWriteFileTotalCostTime = 0;
-
-  //LRU cache load one time
-  private double lruCacheLoadTime = 0;
-
-  //Generate surrogate keys total time for each partition:
-  private ConcurrentHashMap<String, Long[]> parDictionaryValuesTotalTimeMap =
-          new ConcurrentHashMap<String, Long[]>();
-  private ConcurrentHashMap<String, Long[]> parCsvInputStepTimeMap =
-          new ConcurrentHashMap<String, Long[]>();
-  private ConcurrentHashMap<String, Long[]> parGeneratingDictionaryValuesTimeMap =
-          new ConcurrentHashMap<String, Long[]>();
-
-  //Sort rows step total time for each partition:
-  private ConcurrentHashMap<String, Long[]> parSortRowsStepTotalTimeMap =
-          new ConcurrentHashMap<String, Long[]>();
-
-  //MDK generate total time for each partition:
-  private ConcurrentHashMap<String, Long[]> parMdkGenerateTotalTimeMap =
-          new ConcurrentHashMap<String, Long[]>();
-  private ConcurrentHashMap<String, Long[]> parDictionaryValue2MdkAdd2FileTime =
-          new ConcurrentHashMap<String, Long[]>();
-
-  //Node block process information
-  private ConcurrentHashMap<String, Integer> hostBlockMap =
-          new ConcurrentHashMap<String, Integer>();
-
-  //Partition block process information
-  private ConcurrentHashMap<String, Integer> partitionBlockMap =
-          new ConcurrentHashMap<String, Integer>();
-
-  private long totalRecords = 0;
-  private double totalTime = 0;
-
-  @Override
-  public void initPartitonInfo(String PartitionId) {
-    parDictionaryValuesTotalTimeMap.put(PartitionId, new Long[2]);
-    parCsvInputStepTimeMap.put(PartitionId, new Long[2]);
-    parSortRowsStepTotalTimeMap.put(PartitionId, new Long[2]);
-    parGeneratingDictionaryValuesTimeMap.put(PartitionId, new Long[2]);
-    parMdkGenerateTotalTimeMap.put(PartitionId, new Long[2]);
-    parDictionaryValue2MdkAdd2FileTime.put(PartitionId, new Long[2]);
-  }
-
-  //Record the time
-  public void recordDicShuffleAndWriteTime() {
-    Long dicShuffleAndWriteTimePoint = System.currentTimeMillis();
-    if (0 == dicShuffleAndWriteFileTotalStartTime) {
-      dicShuffleAndWriteFileTotalStartTime = dicShuffleAndWriteTimePoint;
-    }
-    if (dicShuffleAndWriteTimePoint - dicShuffleAndWriteFileTotalStartTime >
-            dicShuffleAndWriteFileTotalCostTime) {
-      dicShuffleAndWriteFileTotalCostTime =
-          dicShuffleAndWriteTimePoint - dicShuffleAndWriteFileTotalStartTime;
-    }
-  }
-
-  public void recordLoadCsvfilesToDfTime() {
-    Long loadCsvfilesToDfTimePoint = System.currentTimeMillis();
-    if (0 == loadCsvfilesToDfStartTime) {
-      loadCsvfilesToDfStartTime = loadCsvfilesToDfTimePoint;
-    }
-    if (loadCsvfilesToDfTimePoint - loadCsvfilesToDfStartTime > loadCsvfilesToDfCostTime) {
-      loadCsvfilesToDfCostTime = loadCsvfilesToDfTimePoint - loadCsvfilesToDfStartTime;
-    }
-  }
-
-  public double getLruCacheLoadTime() {
-    return lruCacheLoadTime;
-  }
-
-  public void recordDictionaryValuesTotalTime(String partitionID,
-      Long dictionaryValuesTotalTimeTimePoint) {
-    if (null != parDictionaryValuesTotalTimeMap.get(partitionID)) {
-      if (null == parDictionaryValuesTotalTimeMap.get(partitionID)[0]) {
-        parDictionaryValuesTotalTimeMap.get(partitionID)[0] = dictionaryValuesTotalTimeTimePoint;
-      }
-      if (null == parDictionaryValuesTotalTimeMap.get(partitionID)[1] ||
-          dictionaryValuesTotalTimeTimePoint - parDictionaryValuesTotalTimeMap.get(partitionID)[0] >
-              parDictionaryValuesTotalTimeMap.get(partitionID)[1]) {
-        parDictionaryValuesTotalTimeMap.get(partitionID)[1] = dictionaryValuesTotalTimeTimePoint -
-            parDictionaryValuesTotalTimeMap.get(partitionID)[0];
-      }
-    }
-  }
-
-  public void recordCsvInputStepTime(String partitionID,
-      Long csvInputStepTimePoint) {
-    if (null != parCsvInputStepTimeMap.get(partitionID)) {
-      if (null == parCsvInputStepTimeMap.get(partitionID)[0]) {
-        parCsvInputStepTimeMap.get(partitionID)[0] = csvInputStepTimePoint;
-      }
-      if (null == parCsvInputStepTimeMap.get(partitionID)[1] ||
-              csvInputStepTimePoint - parCsvInputStepTimeMap.get(partitionID)[0] >
-                      parCsvInputStepTimeMap.get(partitionID)[1]) {
-        parCsvInputStepTimeMap.get(partitionID)[1] = csvInputStepTimePoint -
-                parCsvInputStepTimeMap.get(partitionID)[0];
-      }
-    }
-  }
-
-  public void recordLruCacheLoadTime(double lruCacheLoadTime) {
-    this.lruCacheLoadTime = lruCacheLoadTime;
-  }
-
-  public void recordGeneratingDictionaryValuesTime(String partitionID,
-      Long generatingDictionaryValuesTimePoint) {
-    if (null != parGeneratingDictionaryValuesTimeMap.get(partitionID)) {
-      if (null == parGeneratingDictionaryValuesTimeMap.get(partitionID)[0]) {
-        parGeneratingDictionaryValuesTimeMap.get(partitionID)[0] =
-                generatingDictionaryValuesTimePoint;
-      }
-      if (null == parGeneratingDictionaryValuesTimeMap.get(partitionID)[1] ||
-              generatingDictionaryValuesTimePoint - parGeneratingDictionaryValuesTimeMap
-                      .get(partitionID)[0] > parGeneratingDictionaryValuesTimeMap
-                      .get(partitionID)[1]) {
-        parGeneratingDictionaryValuesTimeMap.get(partitionID)[1] =
-                generatingDictionaryValuesTimePoint - parGeneratingDictionaryValuesTimeMap
-                        .get(partitionID)[0];
-      }
-    }
-  }
-
-  public void recordSortRowsStepTotalTime(String partitionID,
-                                          Long sortRowsStepTotalTimePoint) {
-    if (null != parSortRowsStepTotalTimeMap.get(partitionID)) {
-      if (null == parSortRowsStepTotalTimeMap.get(partitionID)[0]) {
-        parSortRowsStepTotalTimeMap.get(partitionID)[0] = sortRowsStepTotalTimePoint;
-      }
-      if (null == parSortRowsStepTotalTimeMap.get(partitionID)[1] ||
-              sortRowsStepTotalTimePoint - parSortRowsStepTotalTimeMap.get(partitionID)[0] >
-                      parSortRowsStepTotalTimeMap.get(partitionID)[1]) {
-        parSortRowsStepTotalTimeMap.get(partitionID)[1] = sortRowsStepTotalTimePoint -
-                parSortRowsStepTotalTimeMap.get(partitionID)[0];
-      }
-    }
-  }
-
-  public void recordMdkGenerateTotalTime(String partitionID,
-                                         Long mdkGenerateTotalTimePoint) {
-    if (null != parMdkGenerateTotalTimeMap.get(partitionID)) {
-      if (null == parMdkGenerateTotalTimeMap.get(partitionID)[0]) {
-        parMdkGenerateTotalTimeMap.get(partitionID)[0] = mdkGenerateTotalTimePoint;
-      }
-      if (null == parMdkGenerateTotalTimeMap.get(partitionID)[1] ||
-              mdkGenerateTotalTimePoint - parMdkGenerateTotalTimeMap.get(partitionID)[0] >
-                      parMdkGenerateTotalTimeMap.get(partitionID)[1]) {
-        parMdkGenerateTotalTimeMap.get(partitionID)[1] = mdkGenerateTotalTimePoint -
-                parMdkGenerateTotalTimeMap.get(partitionID)[0];
-      }
-    }
-  }
-
-  public void recordDictionaryValue2MdkAdd2FileTime(String partitionID,
-      Long dictionaryValue2MdkAdd2FileTimePoint) {
-    if (null != parDictionaryValue2MdkAdd2FileTime.get(partitionID)) {
-      if (null == parDictionaryValue2MdkAdd2FileTime.get(partitionID)[0]) {
-        parDictionaryValue2MdkAdd2FileTime.get(partitionID)[0] =
-                dictionaryValue2MdkAdd2FileTimePoint;
-      }
-      if (null == parDictionaryValue2MdkAdd2FileTime.get(partitionID)[1] ||
-              dictionaryValue2MdkAdd2FileTimePoint - parDictionaryValue2MdkAdd2FileTime
-                      .get(partitionID)[0] > parDictionaryValue2MdkAdd2FileTime
-                      .get(partitionID)[1]) {
-        parDictionaryValue2MdkAdd2FileTime.get(partitionID)[1] =
-                dictionaryValue2MdkAdd2FileTimePoint - parDictionaryValue2MdkAdd2FileTime
-                        .get(partitionID)[0];
-      }
-    }
-  }
-
-  //Record the node blocks information map
-  public void recordHostBlockMap(String host, Integer numBlocks) {
-    hostBlockMap.put(host, numBlocks);
-  }
-
-  //Record the partition blocks information map
-  public void recordPartitionBlockMap(String partitionID, Integer numBlocks) {
-    partitionBlockMap.put(partitionID, numBlocks);
-  }
-
-  public void recordTotalRecords(long totalRecords) {
-    this.totalRecords = totalRecords;
-  }
-
-  //Get the time
-  private double getDicShuffleAndWriteFileTotalTime() {
-    return dicShuffleAndWriteFileTotalCostTime / 1000.0;
-  }
-
-  private double getLoadCsvfilesToDfTime() {
-    return loadCsvfilesToDfCostTime / 1000.0;
-  }
-
-  private double getDictionaryValuesTotalTime(String partitionID) {
-    return parDictionaryValuesTotalTimeMap.get(partitionID)[1] / 1000.0;
-  }
-
-  private double getCsvInputStepTime(String partitionID) {
-    return parCsvInputStepTimeMap.get(partitionID)[1] / 1000.0;
-  }
-
-  private double getGeneratingDictionaryValuesTime(String partitionID) {
-    return parGeneratingDictionaryValuesTimeMap.get(partitionID)[1] / 1000.0;
-  }
-
-  private double getSortRowsStepTotalTime(String partitionID) {
-    return parSortRowsStepTotalTimeMap.get(partitionID)[1] / 1000.0;
-  }
-
-  private double getDictionaryValue2MdkAdd2FileTime(String partitionID) {
-    return parDictionaryValue2MdkAdd2FileTime.get(partitionID)[1] / 1000.0;
-  }
-
-  //Get the hostBlockMap
-  private ConcurrentHashMap<String, Integer> getHostBlockMap() {
-    return hostBlockMap;
-  }
-
-  //Get the partitionBlockMap
-  private ConcurrentHashMap<String, Integer> getPartitionBlockMap() {
-    return partitionBlockMap;
-  }
-
-  //Speed calculate
-  private long getTotalRecords() {
-    return this.totalRecords;
-  }
-
-  private int getLoadSpeed() {
-    return (int)(totalRecords / totalTime);
-  }
-
-  private int getGenDicSpeed() {
-    return (int)(totalRecords / getLoadCsvfilesToDfTime() + getDicShuffleAndWriteFileTotalTime());
-  }
-
-  private int getReadCSVSpeed(String partitionID) {
-    return (int)(totalRecords / getCsvInputStepTime(partitionID));
-  }
-
-  private int getGenSurKeySpeed(String partitionID) {
-    return (int)(totalRecords / getGeneratingDictionaryValuesTime(partitionID));
-  }
-
-  private int getSortKeySpeed(String partitionID) {
-    return (int)(totalRecords / getSortRowsStepTotalTime(partitionID));
-  }
-
-  private int getMDKSpeed(String partitionID) {
-    return (int)(totalRecords / getDictionaryValue2MdkAdd2FileTime(partitionID));
-  }
-
-  private double getTotalTime(String partitionID) {
-    this.totalTime = getLoadCsvfilesToDfTime() + getDicShuffleAndWriteFileTotalTime() +
-        getLruCacheLoadTime() + getDictionaryValuesTotalTime(partitionID) +
-        getDictionaryValue2MdkAdd2FileTime(partitionID);
-    return totalTime;
-  }
-
-  //Print the statistics information
-  private void printDicGenStatisticsInfo() {
-    double loadCsvfilesToDfTime = getLoadCsvfilesToDfTime();
-    LOGGER.audit("STAGE 1 ->Load csv to DataFrame and generate" +
-            " block distinct values: " + loadCsvfilesToDfTime + "(s)");
-    double dicShuffleAndWriteFileTotalTime = getDicShuffleAndWriteFileTotalTime();
-    LOGGER.audit("STAGE 2 ->Global dict shuffle and write dict file: " +
-            + dicShuffleAndWriteFileTotalTime + "(s)");
-  }
-
-  private void printLruCacheLoadTimeInfo() {
-    LOGGER.audit("STAGE 3 ->LRU cache load: " + getLruCacheLoadTime() + "(s)");
-  }
-
-  private void printDictionaryValuesGenStatisticsInfo(String partitionID) {
-    double dictionaryValuesTotalTime = getDictionaryValuesTotalTime(partitionID);
-    LOGGER.audit("STAGE 4 ->Total cost of gen dictionary values, sort and write to temp files: "
-            + dictionaryValuesTotalTime + "(s)");
-    double csvInputStepTime = getCsvInputStepTime(partitionID);
-    double generatingDictionaryValuesTime = getGeneratingDictionaryValuesTime(partitionID);
-    LOGGER.audit("STAGE 4.1 ->  |_read csv file: " + csvInputStepTime + "(s)");
-    LOGGER.audit("STAGE 4.2 ->  |_transform to surrogate key: "
-            + generatingDictionaryValuesTime + "(s)");
-  }
-
-  private void printSortRowsStepStatisticsInfo(String partitionID) {
-    double sortRowsStepTotalTime = getSortRowsStepTotalTime(partitionID);
-    LOGGER.audit("STAGE 4.3 ->  |_sort rows and write to temp file: "
-            + sortRowsStepTotalTime + "(s)");
-  }
-
-  private void printGenMdkStatisticsInfo(String partitionID) {
-    double dictionaryValue2MdkAdd2FileTime = getDictionaryValue2MdkAdd2FileTime(partitionID);
-    LOGGER.audit("STAGE 5 ->Transform to MDK, compress and write fact files: "
-            + dictionaryValue2MdkAdd2FileTime + "(s)");
-  }
-
-  //Print the node blocks information
-  private void printHostBlockMapInfo() {
-    LOGGER.audit("========== BLOCK_INFO ==========");
-    if (getHostBlockMap().size() > 0) {
-      for (String host: getHostBlockMap().keySet()) {
-        LOGGER.audit("BLOCK_INFO ->Node host: " + host);
-        LOGGER.audit("BLOCK_INFO ->The block count in this node: " + getHostBlockMap().get(host));
-      }
-    } else if (getPartitionBlockMap().size() > 0) {
-      for (String parID: getPartitionBlockMap().keySet()) {
-        LOGGER.audit("BLOCK_INFO ->Partition ID: " + parID);
-        LOGGER.audit("BLOCK_INFO ->The block count in this partition: " +
-                getPartitionBlockMap().get(parID));
-      }
-    }
-  }
-
-  //Print the speed information
-  private void printLoadSpeedInfo(String partitionID) {
-    LOGGER.audit("===============Load_Speed_Info===============");
-    LOGGER.audit("Total Num of Records Processed: " + getTotalRecords());
-    LOGGER.audit("Total Time Cost: " + getTotalTime(partitionID) + "(s)");
-    LOGGER.audit("Total Load Speed: " + getLoadSpeed() + "records/s");
-    LOGGER.audit("Generate Dictionaries Speed: " + getGenDicSpeed() + "records/s");
-    LOGGER.audit("Read CSV Speed: " + getReadCSVSpeed(partitionID) + " records/s");
-    LOGGER.audit("Generate Surrogate Key Speed: " + getGenSurKeySpeed(partitionID) + " records/s");
-    LOGGER.audit("Sort Key/Write Temp Files Speed: " + getSortKeySpeed(partitionID) + " records/s");
-    LOGGER.audit("MDK Step Speed: " + getMDKSpeed(partitionID) + " records/s");
-    LOGGER.audit("=============================================");
-  }
-
-  public void printStatisticsInfo(String partitionID) {
-    try {
-      LOGGER.audit("========== TIME_STATISTICS PartitionID: " + partitionID + "==========");
-      printDicGenStatisticsInfo();
-      printLruCacheLoadTimeInfo();
-      printDictionaryValuesGenStatisticsInfo(partitionID);
-      printSortRowsStepStatisticsInfo(partitionID);
-      printGenMdkStatisticsInfo(partitionID);
-      printHostBlockMapInfo();
-      printLoadSpeedInfo(partitionID);
-    } catch (Exception e) {
-      LOGGER.audit("Can't print Statistics Information");
-    } finally {
-      resetLoadStatistics();
-    }
-  }
-
-  //Reset the load statistics values
-  private void resetLoadStatistics() {
-    loadCsvfilesToDfStartTime = 0;
-    loadCsvfilesToDfCostTime = 0;
-    dicShuffleAndWriteFileTotalStartTime = 0;
-    dicShuffleAndWriteFileTotalCostTime = 0;
-    lruCacheLoadTime = 0;
-    totalRecords = 0;
-    totalTime = 0;
-    parDictionaryValuesTotalTimeMap.clear();
-    parCsvInputStepTimeMap.clear();
-    parSortRowsStepTotalTimeMap.clear();
-    parGeneratingDictionaryValuesTimeMap.clear();
-    parMdkGenerateTotalTimeMap.clear();
-    parDictionaryValue2MdkAdd2FileTime.clear();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/CarbonMergerUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonMergerUtil.java b/core/src/main/java/org/carbondata/core/util/CarbonMergerUtil.java
deleted file mode 100644
index e2cdf64..0000000
--- a/core/src/main/java/org/carbondata/core/util/CarbonMergerUtil.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.util;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * Util class for merge activities of 2 loads.
- */
-public class CarbonMergerUtil {
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(CarbonMergerUtil.class.getName());
-
-  public static int[] getCardinalityFromLevelMetadata(String path, String tableName) {
-    int[] localCardinality = null;
-    try {
-      localCardinality = CarbonUtil.getCardinalityFromLevelMetadataFile(
-          path + '/' + CarbonCommonConstants.LEVEL_METADATA_FILE + tableName + ".metadata");
-    } catch (CarbonUtilException e) {
-      LOGGER.error("Error occurred :: " + e.getMessage());
-    }
-
-    return localCardinality;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/CarbonMetadataUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonMetadataUtil.java b/core/src/main/java/org/carbondata/core/util/CarbonMetadataUtil.java
deleted file mode 100644
index c20e88a..0000000
--- a/core/src/main/java/org/carbondata/core/util/CarbonMetadataUtil.java
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.util;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.metadata.index.BlockIndexInfo;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.metadata.BlockletInfoColumnar;
-import org.carbondata.core.metadata.ValueEncoderMeta;
-import org.carbondata.format.BlockIndex;
-import org.carbondata.format.BlockletBTreeIndex;
-import org.carbondata.format.BlockletIndex;
-import org.carbondata.format.BlockletInfo;
-import org.carbondata.format.BlockletMinMaxIndex;
-import org.carbondata.format.ChunkCompressionMeta;
-import org.carbondata.format.ColumnSchema;
-import org.carbondata.format.CompressionCodec;
-import org.carbondata.format.DataChunk;
-import org.carbondata.format.Encoding;
-import org.carbondata.format.FileFooter;
-import org.carbondata.format.IndexHeader;
-import org.carbondata.format.PresenceMeta;
-import org.carbondata.format.SegmentInfo;
-import org.carbondata.format.SortState;
-
-/**
- * Util class to convert to thrift metdata classes
- */
-public class CarbonMetadataUtil {
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(CarbonMetadataUtil.class.getName());
-
-  /**
-   * It converts list of BlockletInfoColumnar to FileFooter thrift objects
-   *
-   * @param infoList
-   * @param numCols
-   * @param cardinalities
-   * @return FileFooter
-   */
-  public static FileFooter convertFileFooter(List<BlockletInfoColumnar> infoList, int numCols,
-      int[] cardinalities, List<ColumnSchema> columnSchemaList,
-      SegmentProperties segmentProperties) throws IOException {
-
-    SegmentInfo segmentInfo = new SegmentInfo();
-    segmentInfo.setNum_cols(columnSchemaList.size());
-    segmentInfo.setColumn_cardinalities(CarbonUtil.convertToIntegerList(cardinalities));
-
-    FileFooter footer = new FileFooter();
-    footer.setNum_rows(getTotalNumberOfRows(infoList));
-    footer.setSegment_info(segmentInfo);
-    for (BlockletInfoColumnar info : infoList) {
-      footer.addToBlocklet_index_list(getBlockletIndex(info));
-    }
-    footer.setTable_columns(columnSchemaList);
-    for (BlockletInfoColumnar info : infoList) {
-      footer.addToBlocklet_info_list(getBlockletInfo(info, columnSchemaList, segmentProperties));
-    }
-    return footer;
-  }
-
-  private static BlockletIndex getBlockletIndex(
-      org.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex info) {
-    BlockletMinMaxIndex blockletMinMaxIndex = new BlockletMinMaxIndex();
-
-    for (int i = 0; i < info.getMinMaxIndex().getMaxValues().length; i++) {
-      blockletMinMaxIndex.addToMax_values(ByteBuffer.wrap(info.getMinMaxIndex().getMaxValues()[i]));
-      blockletMinMaxIndex.addToMin_values(ByteBuffer.wrap(info.getMinMaxIndex().getMinValues()[i]));
-    }
-    BlockletBTreeIndex blockletBTreeIndex = new BlockletBTreeIndex();
-    blockletBTreeIndex.setStart_key(info.getBtreeIndex().getStartKey());
-    blockletBTreeIndex.setEnd_key(info.getBtreeIndex().getEndKey());
-    BlockletIndex blockletIndex = new BlockletIndex();
-    blockletIndex.setMin_max_index(blockletMinMaxIndex);
-    blockletIndex.setB_tree_index(blockletBTreeIndex);
-    return blockletIndex;
-  }
-
-  /**
-   * Get total number of rows for the file.
-   *
-   * @param infoList
-   * @return
-   */
-  private static long getTotalNumberOfRows(List<BlockletInfoColumnar> infoList) {
-    long numberOfRows = 0;
-    for (BlockletInfoColumnar info : infoList) {
-      numberOfRows += info.getNumberOfKeys();
-    }
-    return numberOfRows;
-  }
-
-  private static BlockletIndex getBlockletIndex(BlockletInfoColumnar info) {
-
-    BlockletMinMaxIndex blockletMinMaxIndex = new BlockletMinMaxIndex();
-    for (byte[] max : info.getColumnMaxData()) {
-      blockletMinMaxIndex.addToMax_values(ByteBuffer.wrap(max));
-    }
-    for (byte[] min : info.getColumnMinData()) {
-      blockletMinMaxIndex.addToMin_values(ByteBuffer.wrap(min));
-    }
-    BlockletBTreeIndex blockletBTreeIndex = new BlockletBTreeIndex();
-    blockletBTreeIndex.setStart_key(info.getStartKey());
-    blockletBTreeIndex.setEnd_key(info.getEndKey());
-
-    BlockletIndex blockletIndex = new BlockletIndex();
-    blockletIndex.setMin_max_index(blockletMinMaxIndex);
-    blockletIndex.setB_tree_index(blockletBTreeIndex);
-    return blockletIndex;
-  }
-
-  private static BlockletInfo getBlockletInfo(BlockletInfoColumnar blockletInfoColumnar,
-      List<ColumnSchema> columnSchenma,
-      SegmentProperties segmentProperties) throws IOException {
-
-    BlockletInfo blockletInfo = new BlockletInfo();
-    blockletInfo.setNum_rows(blockletInfoColumnar.getNumberOfKeys());
-
-    List<DataChunk> colDataChunks = new ArrayList<DataChunk>();
-    blockletInfoColumnar.getKeyLengths();
-    int j = 0;
-    int aggregateIndex = 0;
-    boolean[] isSortedKeyColumn = blockletInfoColumnar.getIsSortedKeyColumn();
-    boolean[] aggKeyBlock = blockletInfoColumnar.getAggKeyBlock();
-    boolean[] colGrpblock = blockletInfoColumnar.getColGrpBlocks();
-    for (int i = 0; i < blockletInfoColumnar.getKeyLengths().length; i++) {
-      DataChunk dataChunk = new DataChunk();
-      dataChunk.setChunk_meta(getChunkCompressionMeta());
-      List<Encoding> encodings = new ArrayList<Encoding>();
-      if (containsEncoding(i, Encoding.DICTIONARY, columnSchenma, segmentProperties)) {
-        encodings.add(Encoding.DICTIONARY);
-      }
-      if (containsEncoding(i, Encoding.DIRECT_DICTIONARY, columnSchenma, segmentProperties)) {
-        encodings.add(Encoding.DIRECT_DICTIONARY);
-      }
-      dataChunk.setRowMajor(colGrpblock[i]);
-      //TODO : Once schema PR is merged and information needs to be passed here.
-      dataChunk.setColumn_ids(new ArrayList<Integer>());
-      dataChunk.setData_page_length(blockletInfoColumnar.getKeyLengths()[i]);
-      dataChunk.setData_page_offset(blockletInfoColumnar.getKeyOffSets()[i]);
-      if (aggKeyBlock[i]) {
-        dataChunk.setRle_page_offset(blockletInfoColumnar.getDataIndexMapOffsets()[aggregateIndex]);
-        dataChunk.setRle_page_length(blockletInfoColumnar.getDataIndexMapLength()[aggregateIndex]);
-        encodings.add(Encoding.RLE);
-        aggregateIndex++;
-      }
-      dataChunk
-          .setSort_state(isSortedKeyColumn[i] ? SortState.SORT_EXPLICIT : SortState.SORT_NATIVE);
-
-      if (!isSortedKeyColumn[i]) {
-        dataChunk.setRowid_page_offset(blockletInfoColumnar.getKeyBlockIndexOffSets()[j]);
-        dataChunk.setRowid_page_length(blockletInfoColumnar.getKeyBlockIndexLength()[j]);
-        encodings.add(Encoding.INVERTED_INDEX);
-        j++;
-      }
-
-      //TODO : Right now the encodings are happening at runtime. change as per this encoders.
-      dataChunk.setEncoders(encodings);
-
-      colDataChunks.add(dataChunk);
-    }
-
-    for (int i = 0; i < blockletInfoColumnar.getMeasureLength().length; i++) {
-      DataChunk dataChunk = new DataChunk();
-      dataChunk.setChunk_meta(getChunkCompressionMeta());
-      dataChunk.setRowMajor(false);
-      //TODO : Once schema PR is merged and information needs to be passed here.
-      dataChunk.setColumn_ids(new ArrayList<Integer>());
-      dataChunk.setData_page_length(blockletInfoColumnar.getMeasureLength()[i]);
-      dataChunk.setData_page_offset(blockletInfoColumnar.getMeasureOffset()[i]);
-      //TODO : Right now the encodings are happening at runtime. change as per this encoders.
-      List<Encoding> encodings = new ArrayList<Encoding>();
-      encodings.add(Encoding.DELTA);
-      dataChunk.setEncoders(encodings);
-      //TODO writing dummy presence meta need to set actual presence
-      //meta
-      PresenceMeta presenceMeta = new PresenceMeta();
-      presenceMeta.setPresent_bit_streamIsSet(true);
-      presenceMeta
-          .setPresent_bit_stream(blockletInfoColumnar.getMeasureNullValueIndex()[i].toByteArray());
-      dataChunk.setPresence(presenceMeta);
-      //TODO : PresenceMeta needs to be implemented and set here
-      // dataChunk.setPresence(new PresenceMeta());
-      //TODO : Need to write ValueCompression meta here.
-      List<ByteBuffer> encoderMetaList = new ArrayList<ByteBuffer>();
-      encoderMetaList.add(ByteBuffer.wrap(serializeEncoderMeta(
-          createValueEncoderMeta(blockletInfoColumnar.getCompressionModel(), i))));
-      dataChunk.setEncoder_meta(encoderMetaList);
-      colDataChunks.add(dataChunk);
-    }
-    blockletInfo.setColumn_data_chunks(colDataChunks);
-
-    return blockletInfo;
-  }
-
-  /**
-   * @param blockIndex
-   * @param encoding
-   * @param columnSchemas
-   * @param segmentProperties
-   * @return return true if given encoding is present in column
-   */
-  private static boolean containsEncoding(int blockIndex, Encoding encoding,
-      List<ColumnSchema> columnSchemas, SegmentProperties segmentProperties) {
-    Set<Integer> dimOrdinals = segmentProperties.getDimensionOrdinalForBlock(blockIndex);
-    //column groups will always have dictionary encoding
-    if (dimOrdinals.size() > 1 && Encoding.DICTIONARY == encoding) {
-      return true;
-    }
-    for (Integer dimOrdinal : dimOrdinals) {
-      if (columnSchemas.get(dimOrdinal).encoders.contains(encoding)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  private static byte[] serializeEncoderMeta(ValueEncoderMeta encoderMeta) throws IOException {
-    // TODO : should remove the unnecessary fields.
-    ByteArrayOutputStream aos = new ByteArrayOutputStream();
-    ObjectOutputStream objStream = new ObjectOutputStream(aos);
-    objStream.writeObject(encoderMeta);
-    objStream.close();
-    return aos.toByteArray();
-  }
-
-  private static ValueEncoderMeta createValueEncoderMeta(ValueCompressionModel compressionModel,
-      int index) {
-    ValueEncoderMeta encoderMeta = new ValueEncoderMeta();
-    encoderMeta.setMaxValue(compressionModel.getMaxValue()[index]);
-    encoderMeta.setMinValue(compressionModel.getMinValue()[index]);
-    encoderMeta.setDataTypeSelected(compressionModel.getDataTypeSelected()[index]);
-    encoderMeta.setDecimal(compressionModel.getDecimal()[index]);
-    encoderMeta.setType(compressionModel.getType()[index]);
-    encoderMeta.setUniqueValue(compressionModel.getUniqueValue()[index]);
-    return encoderMeta;
-  }
-
-  /**
-   * Right now it is set to default values. We may use this in future
-   */
-  private static ChunkCompressionMeta getChunkCompressionMeta() {
-    ChunkCompressionMeta chunkCompressionMeta = new ChunkCompressionMeta();
-    chunkCompressionMeta.setCompression_codec(CompressionCodec.SNAPPY);
-    chunkCompressionMeta.setTotal_compressed_size(0);
-    chunkCompressionMeta.setTotal_uncompressed_size(0);
-    return chunkCompressionMeta;
-  }
-
-  /**
-   * It converts FileFooter thrift object to list of BlockletInfoColumnar objects
-   *
-   * @param footer
-   * @return
-   */
-  public static List<BlockletInfoColumnar> convertBlockletInfo(FileFooter footer)
-      throws IOException {
-    List<BlockletInfoColumnar> listOfNodeInfo =
-        new ArrayList<BlockletInfoColumnar>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-    for (BlockletInfo blockletInfo : footer.getBlocklet_info_list()) {
-      BlockletInfoColumnar blockletInfoColumnar = new BlockletInfoColumnar();
-      blockletInfoColumnar.setNumberOfKeys(blockletInfo.getNum_rows());
-      List<DataChunk> columnChunks = blockletInfo.getColumn_data_chunks();
-      List<DataChunk> dictChunks = new ArrayList<DataChunk>();
-      List<DataChunk> nonDictColChunks = new ArrayList<DataChunk>();
-      for (DataChunk dataChunk : columnChunks) {
-        if (dataChunk.getEncoders().get(0).equals(Encoding.DICTIONARY)) {
-          dictChunks.add(dataChunk);
-        } else {
-          nonDictColChunks.add(dataChunk);
-        }
-      }
-      int[] keyLengths = new int[dictChunks.size()];
-      long[] keyOffSets = new long[dictChunks.size()];
-      long[] keyBlockIndexOffsets = new long[dictChunks.size()];
-      int[] keyBlockIndexLens = new int[dictChunks.size()];
-      long[] indexMapOffsets = new long[dictChunks.size()];
-      int[] indexMapLens = new int[dictChunks.size()];
-      boolean[] sortState = new boolean[dictChunks.size()];
-      int i = 0;
-      for (DataChunk dataChunk : dictChunks) {
-        keyLengths[i] = dataChunk.getData_page_length();
-        keyOffSets[i] = dataChunk.getData_page_offset();
-        keyBlockIndexOffsets[i] = dataChunk.getRowid_page_offset();
-        keyBlockIndexLens[i] = dataChunk.getRowid_page_length();
-        indexMapOffsets[i] = dataChunk.getRle_page_offset();
-        indexMapLens[i] = dataChunk.getRle_page_length();
-        sortState[i] = dataChunk.getSort_state().equals(SortState.SORT_EXPLICIT) ? true : false;
-        i++;
-      }
-      blockletInfoColumnar.setKeyLengths(keyLengths);
-      blockletInfoColumnar.setKeyOffSets(keyOffSets);
-      blockletInfoColumnar.setKeyBlockIndexOffSets(keyBlockIndexOffsets);
-      blockletInfoColumnar.setKeyBlockIndexLength(keyBlockIndexLens);
-      blockletInfoColumnar.setDataIndexMapOffsets(indexMapOffsets);
-      blockletInfoColumnar.setDataIndexMapLength(indexMapLens);
-      blockletInfoColumnar.setIsSortedKeyColumn(sortState);
-
-      int[] msrLens = new int[nonDictColChunks.size()];
-      long[] msrOffsets = new long[nonDictColChunks.size()];
-      ValueEncoderMeta[] encoderMetas = new ValueEncoderMeta[nonDictColChunks.size()];
-      i = 0;
-      for (DataChunk msrChunk : nonDictColChunks) {
-        msrLens[i] = msrChunk.getData_page_length();
-        msrOffsets[i] = msrChunk.getData_page_offset();
-        encoderMetas[i] = deserializeValueEncoderMeta(msrChunk.getEncoder_meta().get(0));
-        i++;
-      }
-      blockletInfoColumnar.setMeasureLength(msrLens);
-      blockletInfoColumnar.setMeasureOffset(msrOffsets);
-      blockletInfoColumnar.setCompressionModel(getValueCompressionModel(encoderMetas));
-      listOfNodeInfo.add(blockletInfoColumnar);
-    }
-
-    setBlockletIndex(footer, listOfNodeInfo);
-    return listOfNodeInfo;
-  }
-
-  private static ValueEncoderMeta deserializeValueEncoderMeta(ByteBuffer byteBuffer)
-      throws IOException {
-    ByteArrayInputStream bis = new ByteArrayInputStream(byteBuffer.array());
-    ObjectInputStream objStream = new ObjectInputStream(bis);
-    ValueEncoderMeta encoderMeta = null;
-    try {
-      encoderMeta = (ValueEncoderMeta) objStream.readObject();
-    } catch (ClassNotFoundException e) {
-      LOGGER.error("Error while reading ValueEncoderMeta");
-    }
-    return encoderMeta;
-
-  }
-
-  private static ValueCompressionModel getValueCompressionModel(ValueEncoderMeta[] encoderMetas) {
-    Object[] maxValue = new Object[encoderMetas.length];
-    Object[] minValue = new Object[encoderMetas.length];
-    int[] decimalLength = new int[encoderMetas.length];
-    Object[] uniqueValue = new Object[encoderMetas.length];
-    char[] aggType = new char[encoderMetas.length];
-    byte[] dataTypeSelected = new byte[encoderMetas.length];
-    for (int i = 0; i < encoderMetas.length; i++) {
-      maxValue[i] = encoderMetas[i].getMaxValue();
-      minValue[i] = encoderMetas[i].getMinValue();
-      decimalLength[i] = encoderMetas[i].getDecimal();
-      uniqueValue[i] = encoderMetas[i].getUniqueValue();
-      aggType[i] = encoderMetas[i].getType();
-      dataTypeSelected[i] = encoderMetas[i].getDataTypeSelected();
-    }
-    return ValueCompressionUtil
-        .getValueCompressionModel(maxValue, minValue, decimalLength, uniqueValue, aggType,
-            dataTypeSelected);
-  }
-
-  private static void setBlockletIndex(FileFooter footer,
-      List<BlockletInfoColumnar> listOfNodeInfo) {
-    List<BlockletIndex> blockletIndexList = footer.getBlocklet_index_list();
-    for (int i = 0; i < blockletIndexList.size(); i++) {
-      BlockletBTreeIndex bTreeIndexList = blockletIndexList.get(i).getB_tree_index();
-      BlockletMinMaxIndex minMaxIndexList = blockletIndexList.get(i).getMin_max_index();
-
-      listOfNodeInfo.get(i).setStartKey(bTreeIndexList.getStart_key());
-      listOfNodeInfo.get(i).setEndKey(bTreeIndexList.getEnd_key());
-      byte[][] min = new byte[minMaxIndexList.getMin_values().size()][];
-      byte[][] max = new byte[minMaxIndexList.getMax_values().size()][];
-      for (int j = 0; j < minMaxIndexList.getMax_valuesSize(); j++) {
-        min[j] = minMaxIndexList.getMin_values().get(j).array();
-        max[j] = minMaxIndexList.getMax_values().get(j).array();
-      }
-      listOfNodeInfo.get(i).setColumnMaxData(max);
-    }
-  }
-
-  /**
-   * Below method will be used to get the index header
-   *
-   * @param columnCardinality cardinality of each column
-   * @param columnSchemaList  list of column present in the table
-   * @return Index header object
-   */
-  public static IndexHeader getIndexHeader(int[] columnCardinality,
-      List<ColumnSchema> columnSchemaList) {
-    // create segment info object
-    SegmentInfo segmentInfo = new SegmentInfo();
-    // set the number of columns
-    segmentInfo.setNum_cols(columnSchemaList.size());
-    // setting the column cardinality
-    segmentInfo.setColumn_cardinalities(CarbonUtil.convertToIntegerList(columnCardinality));
-    // create index header object
-    IndexHeader indexHeader = new IndexHeader();
-    // set the segment info
-    indexHeader.setSegment_info(segmentInfo);
-    // set the column names
-    indexHeader.setTable_columns(columnSchemaList);
-    return indexHeader;
-  }
-
-  /**
-   * Below method will be used to get the block index info thrift object for each block
-   * present in the segment
-   *
-   * @param blockIndexInfoList block index info list
-   * @return list of block index
-   */
-  public static List<BlockIndex> getBlockIndexInfo(List<BlockIndexInfo> blockIndexInfoList) {
-    List<BlockIndex> thriftBlockIndexList = new ArrayList<BlockIndex>();
-    BlockIndex blockIndex = null;
-    // below code to create block index info object for each block
-    for (BlockIndexInfo blockIndexInfo : blockIndexInfoList) {
-      blockIndex = new BlockIndex();
-      blockIndex.setNum_rows(blockIndexInfo.getNumberOfRows());
-      blockIndex.setOffset(blockIndexInfo.getNumberOfRows());
-      blockIndex.setFile_name(blockIndexInfo.getFileName());
-      blockIndex.setBlock_index(getBlockletIndex(blockIndexInfo.getBlockletIndex()));
-      thriftBlockIndexList.add(blockIndex);
-    }
-    return thriftBlockIndexList;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/CarbonProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonProperties.java b/core/src/main/java/org/carbondata/core/util/CarbonProperties.java
deleted file mode 100644
index c6a3a1b..0000000
--- a/core/src/main/java/org/carbondata/core/util/CarbonProperties.java
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.util;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Properties;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-public final class CarbonProperties {
-  /**
-   * Attribute for Carbon LOGGER.
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(CarbonProperties.class.getName());
-
-  /**
-   * class instance.
-   */
-  private static final CarbonProperties CARBONPROPERTIESINSTANCE = new CarbonProperties();
-
-  /**
-   * porpeties .
-   */
-  private Properties carbonProperties;
-
-  /**
-   * Private constructor this will call load properties method to load all the
-   * carbon properties in memory.
-   */
-  private CarbonProperties() {
-    carbonProperties = new Properties();
-    loadProperties();
-    validateAndLoadDefaultProperties();
-  }
-
-  /**
-   * This method will be responsible for get this class instance
-   *
-   * @return carbon properties instance
-   */
-  public static CarbonProperties getInstance() {
-    return CARBONPROPERTIESINSTANCE;
-  }
-
-  /**
-   * This method validates the loaded properties and loads default
-   * values in case of wrong values.
-   */
-  private void validateAndLoadDefaultProperties() {
-    if (null == carbonProperties.getProperty(CarbonCommonConstants.STORE_LOCATION)) {
-      carbonProperties.setProperty(CarbonCommonConstants.STORE_LOCATION,
-          CarbonCommonConstants.STORE_LOCATION_DEFAULT_VAL);
-    }
-
-    validateBlockletSize();
-    validateMaxFileSize();
-    validateNumCores();
-    validateNumCoresBlockSort();
-    validateSortSize();
-    validateBadRecordsLocation();
-    validateHighCardinalityIdentify();
-    validateHighCardinalityThreshold();
-    validateHighCardinalityInRowCountPercentage();
-  }
-
-  private void validateBadRecordsLocation() {
-    String badRecordsLocation =
-        carbonProperties.getProperty(CarbonCommonConstants.CARBON_BADRECORDS_LOC);
-    if (null == badRecordsLocation || badRecordsLocation.length() == 0) {
-      carbonProperties.setProperty(CarbonCommonConstants.CARBON_BADRECORDS_LOC,
-          CarbonCommonConstants.CARBON_BADRECORDS_LOC_DEFAULT_VAL);
-    }
-  }
-
-  /**
-   * This method validates the blocklet size
-   */
-  private void validateBlockletSize() {
-    String blockletSizeStr = carbonProperties.getProperty(CarbonCommonConstants.BLOCKLET_SIZE,
-        CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
-    try {
-      int blockletSize = Integer.parseInt(blockletSizeStr);
-
-      if (blockletSize < CarbonCommonConstants.BLOCKLET_SIZE_MIN_VAL
-          || blockletSize > CarbonCommonConstants.BLOCKLET_SIZE_MAX_VAL) {
-        LOGGER.info("The blocklet size value \"" + blockletSizeStr
-                + "\" is invalid. Using the default value \""
-                + CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
-        carbonProperties.setProperty(CarbonCommonConstants.BLOCKLET_SIZE,
-            CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The blocklet size value \"" + blockletSizeStr
-              + "\" is invalid. Using the default value \""
-              + CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
-      carbonProperties.setProperty(CarbonCommonConstants.BLOCKLET_SIZE,
-          CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
-    }
-  }
-
-  /**
-   * TODO: This method validates the maximum number of blocklets per file ?
-   */
-  private void validateMaxFileSize() {
-    String maxFileSizeStr = carbonProperties.getProperty(CarbonCommonConstants.MAX_FILE_SIZE,
-        CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL);
-    try {
-      int maxFileSize = Integer.parseInt(maxFileSizeStr);
-
-      if (maxFileSize < CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL_MIN_VAL
-          || maxFileSize > CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL_MAX_VAL) {
-        LOGGER.info("The max file size value \"" + maxFileSizeStr
-                + "\" is invalid. Using the default value \""
-                + CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL);
-        carbonProperties.setProperty(CarbonCommonConstants.MAX_FILE_SIZE,
-            CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The max file size value \"" + maxFileSizeStr
-              + "\" is invalid. Using the default value \""
-              + CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL);
-
-      carbonProperties.setProperty(CarbonCommonConstants.MAX_FILE_SIZE,
-          CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL);
-    }
-  }
-
-  /**
-   * This method validates the number cores specified
-   */
-  private void validateNumCores() {
-    String numCoresStr = carbonProperties
-        .getProperty(CarbonCommonConstants.NUM_CORES, CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
-    try {
-      int numCores = Integer.parseInt(numCoresStr);
-
-      if (numCores < CarbonCommonConstants.NUM_CORES_MIN_VAL
-          || numCores > CarbonCommonConstants.NUM_CORES_MAX_VAL) {
-        LOGGER.info("The num Cores  value \"" + numCoresStr
-            + "\" is invalid. Using the default value \""
-            + CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
-        carbonProperties.setProperty(CarbonCommonConstants.NUM_CORES,
-            CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The num Cores  value \"" + numCoresStr
-          + "\" is invalid. Using the default value \""
-          + CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
-      carbonProperties.setProperty(CarbonCommonConstants.NUM_CORES,
-          CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
-    }
-  }
-
-  /**
-   * This method validates the number cores specified for mdk block sort
-   */
-  private void validateNumCoresBlockSort() {
-    String numCoresStr = carbonProperties
-        .getProperty(CarbonCommonConstants.NUM_CORES_BLOCK_SORT,
-            CarbonCommonConstants.NUM_CORES_BLOCK_SORT_DEFAULT_VAL);
-    try {
-      int numCores = Integer.parseInt(numCoresStr);
-
-      if (numCores < CarbonCommonConstants.NUM_CORES_BLOCK_SORT_MIN_VAL
-          || numCores > CarbonCommonConstants.NUM_CORES_BLOCK_SORT_MAX_VAL) {
-        LOGGER.info("The num cores value \"" + numCoresStr
-            + "\" for block sort is invalid. Using the default value \""
-            + CarbonCommonConstants.NUM_CORES_BLOCK_SORT_DEFAULT_VAL);
-        carbonProperties.setProperty(CarbonCommonConstants.NUM_CORES_BLOCK_SORT,
-            CarbonCommonConstants.NUM_CORES_BLOCK_SORT_DEFAULT_VAL);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The num cores value \"" + numCoresStr
-          + "\" for block sort is invalid. Using the default value \""
-          + CarbonCommonConstants.NUM_CORES_BLOCK_SORT_DEFAULT_VAL);
-      carbonProperties.setProperty(CarbonCommonConstants.NUM_CORES_BLOCK_SORT,
-          CarbonCommonConstants.NUM_CORES_BLOCK_SORT_DEFAULT_VAL);
-    }
-  }
-
-  /**
-   * This method validates the sort size
-   */
-  private void validateSortSize() {
-    String sortSizeStr = carbonProperties
-        .getProperty(CarbonCommonConstants.SORT_SIZE, CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL);
-    try {
-      int sortSize = Integer.parseInt(sortSizeStr);
-
-      if (sortSize < CarbonCommonConstants.SORT_SIZE_MIN_VAL) {
-        LOGGER.info("The batch size value \"" + sortSizeStr
-            + "\" is invalid. Using the default value \""
-            + CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL);
-        carbonProperties.setProperty(CarbonCommonConstants.SORT_SIZE,
-            CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The batch size value \"" + sortSizeStr
-          + "\" is invalid. Using the default value \""
-          + CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL);
-      carbonProperties.setProperty(CarbonCommonConstants.SORT_SIZE,
-          CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL);
-    }
-  }
-
-  private void validateHighCardinalityIdentify() {
-    String highcardIdentifyStr = carbonProperties.getProperty(
-        CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE,
-        CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE_DEFAULT);
-    try {
-      Boolean.parseBoolean(highcardIdentifyStr);
-    } catch (NumberFormatException e) {
-      LOGGER.info("The high cardinality identify value \"" + highcardIdentifyStr
-          + "\" is invalid. Using the default value \""
-          + CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE_DEFAULT);
-      carbonProperties.setProperty(CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE,
-          CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE_DEFAULT);
-    }
-  }
-
-  private void validateHighCardinalityThreshold() {
-    String highcardThresholdStr = carbonProperties.getProperty(
-        CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD,
-        CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_DEFAULT);
-    try {
-      int highcardThreshold = Integer.parseInt(highcardThresholdStr);
-      if(highcardThreshold < CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_MIN){
-        LOGGER.info("The high cardinality threshold value \"" + highcardThresholdStr
-            + "\" is invalid. Using the min value \""
-            + CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_MIN);
-        carbonProperties.setProperty(CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD,
-            CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_MIN + "");
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The high cardinality threshold value \"" + highcardThresholdStr
-          + "\" is invalid. Using the default value \""
-          + CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_DEFAULT);
-      carbonProperties.setProperty(CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD,
-          CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_DEFAULT);
-    }
-  }
-
-  private void validateHighCardinalityInRowCountPercentage() {
-    String highcardPercentageStr = carbonProperties.getProperty(
-        CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE,
-        CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT);
-    try {
-      double highcardPercentage = Double.parseDouble(highcardPercentageStr);
-      if(highcardPercentage <= 0){
-        LOGGER.info("The percentage of high cardinality in row count value \""
-            + highcardPercentageStr + "\" is invalid. Using the default value \""
-            + CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT);
-        carbonProperties.setProperty(
-            CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE,
-            CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The percentage of high cardinality in row count value \""
-          + highcardPercentageStr + "\" is invalid. Using the default value \""
-          + CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT);
-      carbonProperties.setProperty(CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE,
-          CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT);
-    }
-  }
-
-  /**
-   * This method will read all the properties from file and load it into
-   * memory
-   */
-  private void loadProperties() {
-    String property = System.getProperty("carbon.properties.filepath");
-    if (null == property) {
-      property = CarbonCommonConstants.CARBON_PROPERTIES_FILE_PATH;
-    }
-    File file = new File(property);
-    LOGGER.info("Property file path: " + file.getAbsolutePath());
-
-    FileInputStream fis = null;
-    try {
-      if (file.exists()) {
-        fis = new FileInputStream(file);
-
-        carbonProperties.load(fis);
-      }
-    } catch (FileNotFoundException e) {
-      LOGGER.error("The file: " + CarbonCommonConstants.CARBON_PROPERTIES_FILE_PATH
-          + " does not exist");
-    } catch (IOException e) {
-      LOGGER.error("Error while reading the file: "
-          + CarbonCommonConstants.CARBON_PROPERTIES_FILE_PATH);
-    } finally {
-      if (null != fis) {
-        try {
-          fis.close();
-        } catch (IOException e) {
-          LOGGER.error("Error while closing the file stream for file: "
-                  + CarbonCommonConstants.CARBON_PROPERTIES_FILE_PATH);
-        }
-      }
-    }
-
-    print();
-  }
-
-  /**
-   * This method will be used to get the properties value
-   *
-   * @param key
-   * @return properties value
-   */
-  public String getProperty(String key) {
-    //TODO temporary fix
-    if ("carbon.leaf.node.size".equals(key)) {
-      return "120000";
-    }
-    return carbonProperties.getProperty(key);
-  }
-
-  /**
-   * This method will be used to get the properties value if property is not
-   * present then it will return tghe default value
-   *
-   * @param key
-   * @return properties value
-   */
-  public String getProperty(String key, String defaultValue) {
-    String value = getProperty(key);
-    if (null == value) {
-      return defaultValue;
-    }
-    return value;
-  }
-
-  /**
-   * This method will be used to add a new property
-   *
-   * @param key
-   * @return properties value
-   */
-  public void addProperty(String key, String value) {
-    carbonProperties.setProperty(key, value);
-
-  }
-
-  /**
-   * Validate the restrictions
-   *
-   * @param actual
-   * @param max
-   * @param min
-   * @param defaultVal
-   * @return
-   */
-  public long validate(long actual, long max, long min, long defaultVal) {
-    if (actual <= max && actual >= min) {
-      return actual;
-    }
-    return defaultVal;
-  }
-
-  /**
-   * returns major compaction size value from carbon properties or default value if it is not valid
-   *
-   * @return
-   */
-  public long getMajorCompactionSize() {
-    long compactionSize;
-    try {
-      compactionSize = Long.parseLong(getProperty(CarbonCommonConstants.MAJOR_COMPACTION_SIZE,
-          CarbonCommonConstants.DEFAULT_MAJOR_COMPACTION_SIZE));
-    } catch (NumberFormatException e) {
-      compactionSize = Long.parseLong(CarbonCommonConstants.DEFAULT_MAJOR_COMPACTION_SIZE);
-    }
-    return compactionSize;
-  }
-
-  /**
-   * returns the number of loads to be preserved.
-   *
-   * @return
-   */
-  public int getNumberOfSegmentsToBePreserved() {
-    int numberOfSegmentsToBePreserved;
-    try {
-      numberOfSegmentsToBePreserved = Integer.parseInt(
-          getProperty(CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER,
-              CarbonCommonConstants.DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER));
-      // checking min and max . 0  , 100 is min & max.
-      if (numberOfSegmentsToBePreserved < 0 || numberOfSegmentsToBePreserved > 100) {
-        LOGGER.error("The specified value for property "
-            + CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER + " is incorrect."
-            + " Correct value should be in range of 0 -100. Taking the default value.");
-        numberOfSegmentsToBePreserved =
-            Integer.parseInt(CarbonCommonConstants.DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER);
-      }
-    } catch (NumberFormatException e) {
-      numberOfSegmentsToBePreserved =
-          Integer.parseInt(CarbonCommonConstants.DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER);
-    }
-    return numberOfSegmentsToBePreserved;
-  }
-
-  public void print() {
-    LOGGER.info("------Using Carbon.properties --------");
-    LOGGER.info(carbonProperties.toString());
-  }
-
-  /**
-   * gettting the unmerged segment numbers to be merged.
-   * @return
-   */
-  public int[] getCompactionSegmentLevelCount() {
-    String commaSeparatedLevels;
-
-    commaSeparatedLevels = getProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD,
-        CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD);
-    int[] compactionSize = getIntArray(commaSeparatedLevels);
-
-    if(null == compactionSize){
-      compactionSize = getIntArray(CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD);
-    }
-
-    return compactionSize;
-  }
-
-  /**
-   *
-   * @param commaSeparatedLevels
-   * @return
-   */
-  private int[] getIntArray(String commaSeparatedLevels) {
-    String[] levels = commaSeparatedLevels.split(",");
-    int[] compactionSize = new int[levels.length];
-    int i = 0;
-    for (String levelSize : levels) {
-      try {
-        int size = Integer.parseInt(levelSize.trim());
-        if(validate(size,100,0,-1) < 0 ){
-          // if given size is out of boundary then take default value for all levels.
-          return null;
-        }
-        compactionSize[i++] = size;
-      }
-      catch(NumberFormatException e){
-        LOGGER.error(
-            "Given value for property" + CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD
-                + " is not proper. Taking the default value "
-                + CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD);
-        return null;
-      }
-    }
-    return compactionSize;
-  }
-
-  /**
-   * Validate the restrictions
-   *
-   * @param actual
-   * @param max
-   * @param min
-   * @param defaultVal
-   * @return
-   */
-  public int validate(int actual, int max, int min, int defaultVal) {
-    if (actual <= max && actual >= min) {
-      return actual;
-    }
-    return defaultVal;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/CarbonTimeStatisticsFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonTimeStatisticsFactory.java b/core/src/main/java/org/carbondata/core/util/CarbonTimeStatisticsFactory.java
deleted file mode 100644
index a5c67ff..0000000
--- a/core/src/main/java/org/carbondata/core/util/CarbonTimeStatisticsFactory.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.carbondata.core.util;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-public class CarbonTimeStatisticsFactory {
-  private static String LoadStatisticsInstanceType;
-  private static LoadStatistics LoadStatisticsInstance;
-
-  static {
-    CarbonTimeStatisticsFactory.updateTimeStatisticsUtilStatus();
-    LoadStatisticsInstance = genLoadStatisticsInstance();
-  }
-
-  private static void updateTimeStatisticsUtilStatus() {
-    LoadStatisticsInstanceType = CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.ENABLE_DATA_LOADING_STATISTICS,
-            CarbonCommonConstants.ENABLE_DATA_LOADING_STATISTICS_DEFAULT);
-  }
-
-  private static LoadStatistics genLoadStatisticsInstance() {
-    switch (LoadStatisticsInstanceType.toLowerCase()) {
-      case "false":
-        return CarbonLoadStatisticsDummy.getInstance();
-      case "true":
-        return CarbonLoadStatisticsImpl.getInstance();
-      default:
-        return CarbonLoadStatisticsDummy.getInstance();
-    }
-  }
-
-  public static LoadStatistics getLoadStatisticsInstance() {
-    return LoadStatisticsInstance;
-  }
-
-}


[16/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneLong.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneLong.java
deleted file mode 100644
index d4b3ce5..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneLong.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressNoneLong implements ValueCompressonHolder.UnCompressValue<long[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNoneLong.class.getName());
-  /**
-   * longCompressor.
-   */
-  private static Compressor<long[]> longCompressor =
-      SnappyCompression.SnappyLongCompression.INSTANCE;
-  /**
-   * value.
-   */
-  protected long[] value;
-
-  @Override public void setValue(long[] value) {
-    this.value = value;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException clnNotSupportedExc) {
-      LOGGER.error(clnNotSupportedExc,
-          clnNotSupportedExc.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressNoneByte byte1 = new UnCompressNoneByte();
-    byte1.setValue(longCompressor.compress(value));
-    return byte1;
-
-  }
-
-  @Override
-  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dType) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] byteValue) {
-    ByteBuffer buffer = ByteBuffer.wrap(byteValue);
-    this.value = ValueCompressionUtil.convertToLongArray(buffer, byteValue.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNoneByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    double[] vals = new double[value.length];
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i];
-    }
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneShort.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneShort.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneShort.java
deleted file mode 100644
index d0b6cc2..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneShort.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNoneShort implements ValueCompressonHolder.UnCompressValue<short[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNoneShort.class.getName());
-
-  /**
-   * shortCompressor.
-   */
-  private static Compressor<short[]> shortCompressor =
-      SnappyCompression.SnappyShortCompression.INSTANCE;
-
-  /**
-   * value.
-   */
-  private short[] shortValue;
-
-  @Override public void setValue(short[] shortValue) {
-    this.shortValue = shortValue;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException cns1) {
-      LOGGER.error(cns1, cns1.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-
-    UnCompressNoneByte byte1 = new UnCompressNoneByte();
-    byte1.setValue(shortCompressor.compress(shortValue));
-
-    return byte1;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(shortValue);
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    shortValue = ValueCompressionUtil.convertToShortArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNoneByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    double[] vals = new double[shortValue.length];
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = shortValue[i];
-    }
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/dataholder/CarbonReadDataHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/dataholder/CarbonReadDataHolder.java b/core/src/main/java/org/carbondata/core/datastorage/store/dataholder/CarbonReadDataHolder.java
deleted file mode 100644
index a297b0b..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/dataholder/CarbonReadDataHolder.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.dataholder;
-
-import java.math.BigDecimal;
-
-public class CarbonReadDataHolder {
-
-  /**
-   * doubleValues
-   */
-  private double[] doubleValues;
-
-  /**
-   * longValues
-   */
-  private long[] longValues;
-
-  /**
-   * bigDecimalValues
-   */
-  private BigDecimal[] bigDecimalValues;
-
-  /**
-   * byteValues
-   */
-  private byte[][] byteValues;
-
-  /**
-   * @return the doubleValues
-   */
-  public double[] getReadableDoubleValues() {
-    return doubleValues;
-  }
-
-  /**
-   * @param doubleValues the doubleValues to set
-   */
-  public void setReadableDoubleValues(double[] doubleValues) {
-    this.doubleValues = doubleValues;
-  }
-
-  /**
-   * @return the byteValues
-   */
-  public byte[][] getReadableByteArrayValues() {
-    return byteValues;
-  }
-
-  /**
-   * @param longValues the longValues to set
-   */
-  public void setReadableLongValues(long[] longValues) {
-    this.longValues = longValues;
-  }
-
-  /**
-   * @param longValues the bigDecimalValues to set
-   */
-  public void setReadableBigDecimalValues(BigDecimal[] bigDecimalValues) {
-    this.bigDecimalValues = bigDecimalValues;
-  }
-
-  /**
-   * @param byteValues the byteValues to set
-   */
-  public void setReadableByteValues(byte[][] byteValues) {
-    this.byteValues = byteValues;
-  }
-
-  /**
-   * below method will be used to get the double value by index
-   *
-   * @param index
-   * @return double values
-   */
-  public double getReadableDoubleValueByIndex(int index) {
-    return this.doubleValues[index];
-  }
-
-  public long getReadableLongValueByIndex(int index) {
-    return this.longValues[index];
-  }
-
-  public BigDecimal getReadableBigDecimalValueByIndex(int index) {
-    return this.bigDecimalValues[index];
-  }
-
-  /**
-   * below method will be used to get the readable byte array value by index
-   *
-   * @param index
-   * @return byte array value
-   */
-  public byte[] getReadableByteArrayValueByIndex(int index) {
-    return this.byteValues[index];
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/dataholder/CarbonWriteDataHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/dataholder/CarbonWriteDataHolder.java b/core/src/main/java/org/carbondata/core/datastorage/store/dataholder/CarbonWriteDataHolder.java
deleted file mode 100644
index 0cc974c..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/dataholder/CarbonWriteDataHolder.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.dataholder;
-
-public class CarbonWriteDataHolder {
-  /**
-   * doubleValues
-   */
-  private double[] doubleValues;
-
-  /**
-   * longValues
-   */
-  private long[] longValues;
-
-  /**
-   * byteValues
-   */
-  private byte[][] byteValues;
-
-  /**
-   * byteValues
-   */
-  private byte[][][] columnByteValues;
-
-  /**
-   * size
-   */
-  private int size;
-
-  /**
-   * totalSize
-   */
-  private int totalSize;
-
-  /**
-   * Method to initialise double array
-   *
-   * @param size
-   */
-  public void initialiseDoubleValues(int size) {
-    if (size < 1) {
-      throw new IllegalArgumentException("Invalid array size");
-    }
-    doubleValues = new double[size];
-  }
-
-  public void reset() {
-    size = 0;
-    totalSize = 0;
-  }
-
-  /**
-   * Method to initialise double array
-   *
-   * @param size
-   */
-  public void initialiseByteArrayValues(int size) {
-    if (size < 1) {
-      throw new IllegalArgumentException("Invalid array size");
-    }
-
-    byteValues = new byte[size][];
-    columnByteValues = new byte[size][][];
-  }
-
-  /**
-   * Method to initialise long array
-   *
-   * @param size
-   */
-  public void initialiseLongValues(int size) {
-    if (size < 1) {
-      throw new IllegalArgumentException("Invalid array size");
-    }
-    longValues = new long[size];
-  }
-
-  /**
-   * set double value by index
-   *
-   * @param index
-   * @param value
-   */
-  public void setWritableDoubleValueByIndex(int index, Object value) {
-    doubleValues[index] = (Double) value;
-    size++;
-  }
-
-  /**
-   * set double value by index
-   *
-   * @param index
-   * @param value
-   */
-  public void setWritableLongValueByIndex(int index, Object value) {
-    longValues[index] = (Long) value;
-    size++;
-  }
-
-  /**
-   * set byte array value by index
-   *
-   * @param index
-   * @param value
-   */
-  public void setWritableByteArrayValueByIndex(int index, byte[] value) {
-    byteValues[index] = value;
-    size++;
-    if (null != value) totalSize += value.length;
-  }
-
-  /**
-   * set byte array value by index
-   */
-  public void setWritableByteArrayValueByIndex(int index, int mdKeyIndex, Object[] columnData) {
-    int l = 0;
-    columnByteValues[index] = new byte[columnData.length - (mdKeyIndex + 1)][];
-    for (int i = mdKeyIndex + 1; i < columnData.length; i++) {
-      columnByteValues[index][l++] = (byte[]) columnData[i];
-    }
-  }
-
-  /**
-   * Get Writable Double Values
-   */
-  public double[] getWritableDoubleValues() {
-    if (size < doubleValues.length) {
-      double[] temp = new double[size];
-      System.arraycopy(doubleValues, 0, temp, 0, size);
-      doubleValues = temp;
-    }
-    return doubleValues;
-  }
-
-  /**
-   * Get writable byte array values
-   */
-  public byte[] getWritableByteArrayValues() {
-    byte[] temp = new byte[totalSize];
-    int startIndexToCopy = 0;
-    for (int i = 0; i < size; i++) {
-      System.arraycopy(byteValues[i], 0, temp, startIndexToCopy, byteValues[i].length);
-      startIndexToCopy += byteValues[i].length;
-    }
-    return temp;
-  }
-
-  public byte[][] getByteArrayValues() {
-    if (size < byteValues.length) {
-      byte[][] temp = new byte[size][];
-      System.arraycopy(byteValues, 0, temp, 0, size);
-      byteValues = temp;
-    }
-    return byteValues;
-  }
-
-  /**
-   * Get Writable Double Values
-   *
-   * @return
-   */
-  public long[] getWritableLongValues() {
-    if (size < longValues.length) {
-      long[] temp = new long[size];
-      System.arraycopy(longValues, 0, temp, 0, size);
-      longValues = temp;
-    }
-    return longValues;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/AbstractDFSCarbonFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/AbstractDFSCarbonFile.java b/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/AbstractDFSCarbonFile.java
deleted file mode 100644
index b04cd47..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/AbstractDFSCarbonFile.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.filesystem;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.util.CarbonUtil;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-public abstract  class AbstractDFSCarbonFile implements CarbonFile {
-  /**
-   * LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(AbstractDFSCarbonFile.class.getName());
-  protected FileStatus fileStatus;
-  protected FileSystem fs;
-
-  public AbstractDFSCarbonFile(String filePath) {
-    filePath = filePath.replace("\\", "/");
-    Path path = new Path(filePath);
-    try {
-      fs = path.getFileSystem(FileFactory.getConfiguration());
-      fileStatus = fs.getFileStatus(path);
-    } catch (IOException e) {
-      LOGGER.error("Exception occured:" + e.getMessage());
-    }
-  }
-
-  public AbstractDFSCarbonFile(Path path) {
-    try {
-      fs = path.getFileSystem(FileFactory.getConfiguration());
-      fileStatus = fs.getFileStatus(path);
-    } catch (IOException e) {
-      LOGGER.error("Exception occured:" + e.getMessage());
-    }
-  }
-
-  public AbstractDFSCarbonFile(FileStatus fileStatus) {
-    this.fileStatus = fileStatus;
-  }
-
-  @Override public boolean createNewFile() {
-    Path path = fileStatus.getPath();
-    try {
-      return fs.createNewFile(path);
-    } catch (IOException e) {
-      return false;
-    }
-  }
-
-  @Override public String getAbsolutePath() {
-    return fileStatus.getPath().toString();
-  }
-
-  @Override public String getName() {
-    return fileStatus.getPath().getName();
-  }
-
-  @Override public boolean isDirectory() {
-    return fileStatus.isDirectory();
-  }
-
-  @Override public boolean exists() {
-    try {
-      if (null != fileStatus) {
-        fs = fileStatus.getPath().getFileSystem(FileFactory.getConfiguration());
-        return fs.exists(fileStatus.getPath());
-      }
-    } catch (IOException e) {
-      LOGGER.error("Exception occured:" + e.getMessage());
-    }
-    return false;
-  }
-
-  @Override public String getCanonicalPath() {
-    return getAbsolutePath();
-  }
-
-  @Override public String getPath() {
-    return getAbsolutePath();
-  }
-
-  @Override public long getSize() {
-    return fileStatus.getLen();
-  }
-
-  public boolean renameTo(String changetoName) {
-    FileSystem fs;
-    try {
-      fs = fileStatus.getPath().getFileSystem(FileFactory.getConfiguration());
-      return fs.rename(fileStatus.getPath(), new Path(changetoName));
-    } catch (IOException e) {
-      LOGGER.error("Exception occured:" + e.getMessage());
-      return false;
-    }
-  }
-
-  public boolean delete() {
-    FileSystem fs;
-    try {
-      fs = fileStatus.getPath().getFileSystem(FileFactory.getConfiguration());
-      return fs.delete(fileStatus.getPath(), true);
-    } catch (IOException e) {
-      LOGGER.error("Exception occured:" + e.getMessage());
-      return false;
-    }
-  }
-
-  @Override public long getLastModifiedTime() {
-    return fileStatus.getModificationTime();
-  }
-
-  @Override public boolean setLastModifiedTime(long timestamp) {
-    try {
-      fs.setTimes(fileStatus.getPath(), timestamp, timestamp);
-    } catch (IOException e) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * This method will delete the data in file data from a given offset
-   */
-  @Override public boolean truncate(String fileName, long validDataEndOffset) {
-    DataOutputStream dataOutputStream = null;
-    DataInputStream dataInputStream = null;
-    boolean fileTruncatedSuccessfully = false;
-    // if bytes to read less than 1024 then buffer size should be equal to the given offset
-    int bufferSize = validDataEndOffset > CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR ?
-        CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR :
-        (int) validDataEndOffset;
-    // temporary file name
-    String tempWriteFilePath = fileName + CarbonCommonConstants.TEMPWRITEFILEEXTENSION;
-    FileFactory.FileType fileType = FileFactory.getFileType(fileName);
-    try {
-      CarbonFile tempFile = null;
-      // delete temporary file if it already exists at a given path
-      if (FileFactory.isFileExist(tempWriteFilePath, fileType)) {
-        tempFile = FileFactory.getCarbonFile(tempWriteFilePath, fileType);
-        tempFile.delete();
-      }
-      // create new temporary file
-      FileFactory.createNewFile(tempWriteFilePath, fileType);
-      tempFile = FileFactory.getCarbonFile(tempWriteFilePath, fileType);
-      byte[] buff = new byte[bufferSize];
-      dataInputStream = FileFactory.getDataInputStream(fileName, fileType);
-      // read the data
-      int read = dataInputStream.read(buff, 0, buff.length);
-      dataOutputStream = FileFactory.getDataOutputStream(tempWriteFilePath, fileType);
-      dataOutputStream.write(buff, 0, read);
-      long remaining = validDataEndOffset - read;
-      // anytime we should not cross the offset to be read
-      while (remaining > 0) {
-        if (remaining > bufferSize) {
-          buff = new byte[bufferSize];
-        } else {
-          buff = new byte[(int) remaining];
-        }
-        read = dataInputStream.read(buff, 0, buff.length);
-        dataOutputStream.write(buff, 0, read);
-        remaining = remaining - read;
-      }
-      CarbonUtil.closeStreams(dataInputStream, dataOutputStream);
-      // rename the temp file to original file
-      tempFile.renameForce(fileName);
-      fileTruncatedSuccessfully = true;
-    } catch (IOException e) {
-      LOGGER.error("Exception occured while truncating the file " + e.getMessage());
-    } finally {
-      CarbonUtil.closeStreams(dataOutputStream, dataInputStream);
-    }
-    return fileTruncatedSuccessfully;
-  }
-
-  /**
-   * This method will be used to check whether a file has been modified or not
-   *
-   * @param fileTimeStamp time to be compared with latest timestamp of file
-   * @param endOffset     file length to be compared with current length of file
-   * @return
-   */
-  @Override public boolean isFileModified(long fileTimeStamp, long endOffset) {
-    boolean isFileModified = false;
-    if (getLastModifiedTime() > fileTimeStamp || getSize() > endOffset) {
-      isFileModified = true;
-    }
-    return isFileModified;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/CarbonFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/CarbonFile.java b/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/CarbonFile.java
deleted file mode 100644
index 8fab676..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/CarbonFile.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.filesystem;
-
-public interface CarbonFile {
-
-  String getAbsolutePath();
-
-  CarbonFile[] listFiles(CarbonFileFilter fileFilter);
-
-  CarbonFile[] listFiles();
-
-  String getName();
-
-  boolean isDirectory();
-
-  boolean exists();
-
-  String getCanonicalPath();
-
-  CarbonFile getParentFile();
-
-  String getPath();
-
-  long getSize();
-
-  boolean renameTo(String changetoName);
-
-  boolean renameForce(String changetoName);
-
-  boolean delete();
-
-  boolean createNewFile();
-
-  long getLastModifiedTime();
-
-  boolean setLastModifiedTime(long timestamp);
-
-  boolean truncate(String fileName, long validDataEndOffset);
-
-  /**
-   * This method will be used to check whether a file has been modified or not
-   *
-   * @param fileTimeStamp time to be compared with latest timestamp of file
-   * @param endOffset     file length to be compared with current length of file
-   * @return
-   */
-  boolean isFileModified(long fileTimeStamp, long endOffset);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/CarbonFileFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/CarbonFileFilter.java b/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/CarbonFileFilter.java
deleted file mode 100644
index 4dbfbd3..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/CarbonFileFilter.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.filesystem;
-
-public interface CarbonFileFilter {
-  boolean accept(CarbonFile file);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/HDFSCarbonFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/HDFSCarbonFile.java b/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/HDFSCarbonFile.java
deleted file mode 100644
index 2df5a82..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/HDFSCarbonFile.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.filesystem;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-
-public class HDFSCarbonFile extends AbstractDFSCarbonFile {
-  /**
-   * LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(HDFSCarbonFile.class.getName());
-
-  public HDFSCarbonFile(String filePath) {
-    super(filePath);
-  }
-
-  public HDFSCarbonFile(Path path) {
-    super(path);
-  }
-
-  public HDFSCarbonFile(FileStatus fileStatus) {
-    super(fileStatus);
-  }
-
-  /**
-   * @param listStatus
-   * @return
-   */
-  private CarbonFile[] getFiles(FileStatus[] listStatus) {
-    if (listStatus == null) {
-      return new CarbonFile[0];
-    }
-    CarbonFile[] files = new CarbonFile[listStatus.length];
-    for (int i = 0; i < files.length; i++) {
-      files[i] = new HDFSCarbonFile(listStatus[i]);
-    }
-    return files;
-  }
-
-  @Override
-  public CarbonFile[] listFiles() {
-    FileStatus[] listStatus = null;
-    try {
-      if (null != fileStatus && fileStatus.isDirectory()) {
-        Path path = fileStatus.getPath();
-        listStatus = path.getFileSystem(FileFactory.getConfiguration()).listStatus(path);
-      } else {
-        return null;
-      }
-    } catch (IOException e) {
-      LOGGER.error("Exception occured: " + e.getMessage());
-      return new CarbonFile[0];
-    }
-    return getFiles(listStatus);
-  }
-
-  @Override
-  public CarbonFile[] listFiles(final CarbonFileFilter fileFilter) {
-    CarbonFile[] files = listFiles();
-    if (files != null && files.length >= 1) {
-      List<CarbonFile> fileList = new ArrayList<CarbonFile>(files.length);
-      for (int i = 0; i < files.length; i++) {
-        if (fileFilter.accept(files[i])) {
-          fileList.add(files[i]);
-        }
-      }
-      if (fileList.size() >= 1) {
-        return fileList.toArray(new CarbonFile[fileList.size()]);
-      } else {
-        return new CarbonFile[0];
-      }
-    }
-    return files;
-  }
-
-  @Override
-  public CarbonFile getParentFile() {
-    Path parent = fileStatus.getPath().getParent();
-    return null == parent ? null : new HDFSCarbonFile(parent);
-  }
-
-  @Override
-  public boolean renameForce(String changetoName) {
-    FileSystem fs;
-    try {
-      fs = fileStatus.getPath().getFileSystem(FileFactory.getConfiguration());
-      if (fs instanceof DistributedFileSystem) {
-        ((DistributedFileSystem) fs).rename(fileStatus.getPath(), new Path(changetoName),
-            org.apache.hadoop.fs.Options.Rename.OVERWRITE);
-        return true;
-      } else {
-        return false;
-      }
-    } catch (IOException e) {
-      LOGGER.error("Exception occured: " + e.getMessage());
-      return false;
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/LocalCarbonFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/LocalCarbonFile.java b/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/LocalCarbonFile.java
deleted file mode 100644
index 55f20de..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/LocalCarbonFile.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.filesystem;
-
-import java.io.File;
-import java.io.FileFilter;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.channels.FileChannel;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.util.CarbonUtil;
-
-import org.apache.hadoop.fs.Path;
-
-public class LocalCarbonFile implements CarbonFile {
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(LocalCarbonFile.class.getName());
-  private File file;
-
-  public LocalCarbonFile(String filePath) {
-    Path pathWithoutSchemeAndAuthority = Path.getPathWithoutSchemeAndAuthority(new Path(filePath));
-    file = new File(pathWithoutSchemeAndAuthority.toString());
-  }
-
-  public LocalCarbonFile(File file) {
-    this.file = file;
-  }
-
-  @Override public String getAbsolutePath() {
-    return file.getAbsolutePath();
-  }
-
-  @Override public CarbonFile[] listFiles(final CarbonFileFilter fileFilter) {
-    if (!file.isDirectory()) {
-      return null;
-    }
-
-    File[] files = file.listFiles(new FileFilter() {
-
-      @Override public boolean accept(File pathname) {
-        return fileFilter.accept(new LocalCarbonFile(pathname));
-      }
-    });
-
-    if (files == null) {
-      return new CarbonFile[0];
-    }
-
-    CarbonFile[] carbonFiles = new CarbonFile[files.length];
-
-    for (int i = 0; i < carbonFiles.length; i++) {
-      carbonFiles[i] = new LocalCarbonFile(files[i]);
-    }
-
-    return carbonFiles;
-  }
-
-  @Override public String getName() {
-    return file.getName();
-  }
-
-  @Override public boolean isDirectory() {
-    return file.isDirectory();
-  }
-
-  @Override public boolean exists() {
-    return file.exists();
-  }
-
-  @Override public String getCanonicalPath() {
-    try {
-      return file.getCanonicalPath();
-    } catch (IOException e) {
-      LOGGER
-          .error(e, "Exception occured" + e.getMessage());
-    }
-    return null;
-  }
-
-  @Override public CarbonFile getParentFile() {
-    return new LocalCarbonFile(file.getParentFile());
-  }
-
-  @Override public String getPath() {
-    return file.getPath();
-  }
-
-  @Override public long getSize() {
-    return file.length();
-  }
-
-  public boolean renameTo(String changetoName) {
-    return file.renameTo(new File(changetoName));
-  }
-
-  public boolean delete() {
-    return file.delete();
-  }
-
-  @Override public CarbonFile[] listFiles() {
-
-    if (!file.isDirectory()) {
-      return null;
-    }
-    File[] files = file.listFiles();
-    if (files == null) {
-      return new CarbonFile[0];
-    }
-    CarbonFile[] carbonFiles = new CarbonFile[files.length];
-    for (int i = 0; i < carbonFiles.length; i++) {
-      carbonFiles[i] = new LocalCarbonFile(files[i]);
-    }
-
-    return carbonFiles;
-
-  }
-
-  @Override public boolean createNewFile() {
-    try {
-      return file.createNewFile();
-    } catch (IOException e) {
-      return false;
-    }
-  }
-
-  @Override public long getLastModifiedTime() {
-    return file.lastModified();
-  }
-
-  @Override public boolean setLastModifiedTime(long timestamp) {
-    return file.setLastModified(timestamp);
-  }
-
-  /**
-   * This method will delete the data in file data from a given offset
-   */
-  @Override public boolean truncate(String fileName, long validDataEndOffset) {
-    FileChannel source = null;
-    FileChannel destination = null;
-    boolean fileTruncatedSuccessfully = false;
-    // temporary file name
-    String tempWriteFilePath = fileName + CarbonCommonConstants.TEMPWRITEFILEEXTENSION;
-    FileFactory.FileType fileType = FileFactory.getFileType(fileName);
-    try {
-      CarbonFile tempFile = null;
-      // delete temporary file if it already exists at a given path
-      if (FileFactory.isFileExist(tempWriteFilePath, fileType)) {
-        tempFile = FileFactory.getCarbonFile(tempWriteFilePath, fileType);
-        tempFile.delete();
-      }
-      // create new temporary file
-      FileFactory.createNewFile(tempWriteFilePath, fileType);
-      tempFile = FileFactory.getCarbonFile(tempWriteFilePath, fileType);
-      source = new FileInputStream(fileName).getChannel();
-      destination = new FileOutputStream(tempWriteFilePath).getChannel();
-      long read = destination.transferFrom(source, 0, validDataEndOffset);
-      long totalBytesRead = read;
-      long remaining = validDataEndOffset - totalBytesRead;
-      // read till required data offset is not reached
-      while (remaining > 0) {
-        read = destination.transferFrom(source, totalBytesRead, remaining);
-        totalBytesRead = totalBytesRead + read;
-        remaining = remaining - totalBytesRead;
-      }
-      CarbonUtil.closeStreams(source, destination);
-      // rename the temp file to original file
-      tempFile.renameForce(fileName);
-      fileTruncatedSuccessfully = true;
-    } catch (IOException e) {
-      LOGGER.error("Exception occured while truncating the file " + e.getMessage());
-    } finally {
-      CarbonUtil.closeStreams(source, destination);
-    }
-    return fileTruncatedSuccessfully;
-  }
-
-  /**
-   * This method will be used to check whether a file has been modified or not
-   *
-   * @param fileTimeStamp time to be compared with latest timestamp of file
-   * @param endOffset     file length to be compared with current length of file
-   * @return
-   */
-  @Override public boolean isFileModified(long fileTimeStamp, long endOffset) {
-    boolean isFileModified = false;
-    if (getLastModifiedTime() > fileTimeStamp || getSize() > endOffset) {
-      isFileModified = true;
-    }
-    return isFileModified;
-  }
-
-  @Override public boolean renameForce(String changetoName) {
-    File destFile = new File(changetoName);
-    if (destFile.exists()) {
-      if (destFile.delete()) {
-        return file.renameTo(new File(changetoName));
-      }
-    }
-
-    return file.renameTo(new File(changetoName));
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/ViewFSCarbonFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/ViewFSCarbonFile.java b/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/ViewFSCarbonFile.java
deleted file mode 100644
index 43c2ef9..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/filesystem/ViewFSCarbonFile.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.datastorage.store.filesystem;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.viewfs.ViewFileSystem;
-
-public class ViewFSCarbonFile extends AbstractDFSCarbonFile {
-  /**
-   * LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(ViewFSCarbonFile.class.getName());
-
-  public ViewFSCarbonFile(String filePath) {
-    super(filePath);
-  }
-
-  public ViewFSCarbonFile(Path path) {
-    super(path);
-  }
-
-  public ViewFSCarbonFile(FileStatus fileStatus) {
-    super(fileStatus);
-  }
-
-  /**
-   * @param listStatus
-   * @return
-   */
-  private CarbonFile[] getFiles(FileStatus[] listStatus) {
-    if (listStatus == null) {
-      return new CarbonFile[0];
-    }
-    CarbonFile[] files = new CarbonFile[listStatus.length];
-    for (int i = 0; i < files.length; i++) {
-      files[i] = new ViewFSCarbonFile(listStatus[i]);
-    }
-    return files;
-  }
-
-  @Override
-  public CarbonFile[] listFiles() {
-    FileStatus[] listStatus = null;
-    try {
-      if (null != fileStatus && fileStatus.isDirectory()) {
-        Path path = fileStatus.getPath();
-        listStatus = path.getFileSystem(FileFactory.getConfiguration()).listStatus(path);
-      } else {
-        return null;
-      }
-    } catch (IOException ex) {
-      LOGGER.error("Exception occured" + ex.getMessage());
-      return new CarbonFile[0];
-    }
-    return getFiles(listStatus);
-  }
-
-  @Override
-  public CarbonFile[] listFiles(final CarbonFileFilter fileFilter) {
-    CarbonFile[] files = listFiles();
-    if (files != null && files.length >= 1) {
-      List<CarbonFile> fileList = new ArrayList<CarbonFile>(files.length);
-      for (int i = 0; i < files.length; i++) {
-        if (fileFilter.accept(files[i])) {
-          fileList.add(files[i]);
-        }
-      }
-      if (fileList.size() >= 1) {
-        return fileList.toArray(new CarbonFile[fileList.size()]);
-      } else {
-        return new CarbonFile[0];
-      }
-    }
-    return files;
-  }
-
-  @Override public CarbonFile getParentFile() {
-    Path parent = fileStatus.getPath().getParent();
-    return null == parent ? null : new ViewFSCarbonFile(parent);
-  }
-
-  @Override
-  public boolean renameForce(String changetoName) {
-    FileSystem fs;
-    try {
-      fs = fileStatus.getPath().getFileSystem(FileFactory.getConfiguration());
-      if (fs instanceof ViewFileSystem) {
-        fs.delete(new Path(changetoName), true);
-        fs.rename(fileStatus.getPath(), new Path(changetoName));
-        return true;
-      } else {
-        return false;
-      }
-    } catch (IOException e) {
-      LOGGER.error("Exception occured" + e.getMessage());
-      return false;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/CompressedDataMeasureDataWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/CompressedDataMeasureDataWrapper.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/CompressedDataMeasureDataWrapper.java
deleted file mode 100644
index ed4ec11..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/CompressedDataMeasureDataWrapper.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl;
-
-import org.carbondata.core.datastorage.store.MeasureDataWrapper;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-
-public class CompressedDataMeasureDataWrapper implements MeasureDataWrapper {
-
-  private final CarbonReadDataHolder[] values;
-
-  public CompressedDataMeasureDataWrapper(final CarbonReadDataHolder[] values) {
-    this.values = values;
-  }
-
-  @Override public CarbonReadDataHolder[] getValues() {
-    return values;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/DFSFileHolderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/DFSFileHolderImpl.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/DFSFileHolderImpl.java
deleted file mode 100644
index 2ffdb5a..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/DFSFileHolderImpl.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.datastorage.store.impl;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.FileHolder;
-
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-
-public class DFSFileHolderImpl implements FileHolder {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(DFSFileHolderImpl.class.getName());
-  /**
-   * cache to hold filename and its stream
-   */
-  private Map<String, FSDataInputStream> fileNameAndStreamCache;
-
-  public DFSFileHolderImpl() {
-    this.fileNameAndStreamCache =
-        new HashMap<String, FSDataInputStream>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-  }
-
-  @Override public byte[] readByteArray(String filePath, long offset, int length) {
-    FSDataInputStream fileChannel = updateCache(filePath);
-    byte[] byteBffer = read(fileChannel, length, offset);
-    return byteBffer;
-  }
-
-  /**
-   * This method will be used to check whether stream is already present in
-   * cache or not for filepath if not present then create it and then add to
-   * cache, other wise get from cache
-   *
-   * @param filePath fully qualified file path
-   * @return channel
-   */
-  private FSDataInputStream updateCache(String filePath) {
-    FSDataInputStream fileChannel = fileNameAndStreamCache.get(filePath);
-    try {
-      if (null == fileChannel) {
-        Path pt = new Path(filePath);
-        FileSystem fs = FileSystem.get(FileFactory.getConfiguration());
-        fileChannel = fs.open(pt);
-        fileNameAndStreamCache.put(filePath, fileChannel);
-      }
-    } catch (IOException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return fileChannel;
-  }
-
-  /**
-   * This method will be used to read from file based on number of bytes to be read and positon
-   *
-   * @param channel file channel
-   * @param size    number of bytes
-   * @param offset  position
-   * @return byte buffer
-   */
-  private byte[] read(FSDataInputStream channel, int size, long offset) {
-    byte[] byteBffer = new byte[size];
-    try {
-      channel.seek(offset);
-      channel.readFully(byteBffer);
-    } catch (Exception e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return byteBffer;
-  }
-
-  /**
-   * This method will be used to read from file based on number of bytes to be read and positon
-   *
-   * @param channel file channel
-   * @param size    number of bytes
-   * @return byte buffer
-   */
-  private byte[] read(FSDataInputStream channel, int size) {
-    byte[] byteBffer = new byte[size];
-    try {
-      channel.readFully(byteBffer);
-    } catch (Exception e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return byteBffer;
-  }
-
-  @Override public int readInt(String filePath, long offset) {
-    FSDataInputStream fileChannel = updateCache(filePath);
-    int i = -1;
-    try {
-      fileChannel.seek(offset);
-      i = fileChannel.readInt();
-    } catch (IOException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-
-    return i;
-  }
-
-  @Override public long readDouble(String filePath, long offset) {
-    FSDataInputStream fileChannel = updateCache(filePath);
-    long i = -1;
-    try {
-      fileChannel.seek(offset);
-      i = fileChannel.readLong();
-    } catch (IOException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-
-    return i;
-  }
-
-  @Override public void finish() {
-    for (Entry<String, FSDataInputStream> entry : fileNameAndStreamCache.entrySet()) {
-      try {
-        FSDataInputStream channel = entry.getValue();
-        if (null != channel) {
-          channel.close();
-        }
-      } catch (IOException exception) {
-        LOGGER.error(exception, exception.getMessage());
-      }
-    }
-
-  }
-
-  @Override public byte[] readByteArray(String filePath, int length) {
-    FSDataInputStream fileChannel = updateCache(filePath);
-    byte[] byteBffer = read(fileChannel, length);
-    return byteBffer;
-  }
-
-  @Override public long readLong(String filePath, long offset) {
-    FSDataInputStream fileChannel = updateCache(filePath);
-    long i = -1;
-    try {
-      fileChannel.seek(offset);
-      i = fileChannel.readLong();
-    } catch (IOException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return i;
-  }
-
-  @Override public int readInt(String filePath) {
-    FSDataInputStream fileChannel = updateCache(filePath);
-    int i = -1;
-    try {
-      i = fileChannel.readInt();
-    } catch (IOException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return i;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/FileFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/FileFactory.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/FileFactory.java
deleted file mode 100644
index 8005102..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/FileFactory.java
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.filesystem.CarbonFile;
-import org.carbondata.core.datastorage.store.filesystem.HDFSCarbonFile;
-import org.carbondata.core.datastorage.store.filesystem.LocalCarbonFile;
-import org.carbondata.core.datastorage.store.filesystem.ViewFSCarbonFile;
-import org.carbondata.core.util.CarbonUtil;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-public final class FileFactory {
-  private static Configuration configuration = null;
-
-  private static FileType storeDefaultFileType = FileType.LOCAL;
-
-  static {
-    String property = CarbonUtil.getCarbonStorePath(null, null);
-    if (property != null) {
-      if (property.startsWith(CarbonUtil.HDFS_PREFIX)) {
-        storeDefaultFileType = FileType.HDFS;
-      } else if (property.startsWith(CarbonUtil.VIEWFS_PREFIX)) {
-        storeDefaultFileType = FileType.VIEWFS;
-      }
-    }
-
-    configuration = new Configuration();
-    configuration.addResource(new Path("../core-default.xml"));
-  }
-
-  private FileFactory() {
-
-  }
-
-  public static Configuration getConfiguration() {
-    return configuration;
-  }
-
-  public static FileHolder getFileHolder(FileType fileType) {
-    switch (fileType) {
-      case LOCAL:
-        return new FileHolderImpl();
-      case HDFS:
-      case VIEWFS:
-        return new DFSFileHolderImpl();
-      default:
-        return new FileHolderImpl();
-    }
-  }
-
-  public static FileType getFileType() {
-    String property = CarbonUtil.getCarbonStorePath(null, null);
-    if (property != null) {
-      if (property.startsWith(CarbonUtil.HDFS_PREFIX)) {
-        storeDefaultFileType = FileType.HDFS;
-      } else if (property.startsWith(CarbonUtil.VIEWFS_PREFIX)) {
-        storeDefaultFileType = FileType.VIEWFS;
-      }
-    }
-    return storeDefaultFileType;
-  }
-
-  public static FileType getFileType(String path) {
-    if (path.startsWith(CarbonUtil.HDFS_PREFIX)) {
-      return FileType.HDFS;
-    } else if (path.startsWith(CarbonUtil.VIEWFS_PREFIX)) {
-      return FileType.VIEWFS;
-    }
-    return FileType.LOCAL;
-  }
-
-  public static CarbonFile getCarbonFile(String path, FileType fileType) {
-    switch (fileType) {
-      case LOCAL:
-        return new LocalCarbonFile(path);
-      case HDFS:
-        return new HDFSCarbonFile(path);
-      case VIEWFS:
-        return new ViewFSCarbonFile(path);
-      default:
-        return new LocalCarbonFile(path);
-    }
-  }
-
-  public static DataInputStream getDataInputStream(String path, FileType fileType)
-      throws IOException {
-    path = path.replace("\\", "/");
-    switch (fileType) {
-      case LOCAL:
-        return new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
-      case HDFS:
-      case VIEWFS:
-        Path pt = new Path(path);
-        FileSystem fs = FileSystem.get(configuration);
-        FSDataInputStream stream = fs.open(pt);
-        return new DataInputStream(new BufferedInputStream(stream));
-      default:
-        return new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
-    }
-  }
-
-  public static DataInputStream getDataInputStream(String path, FileType fileType, int bufferSize)
-      throws IOException {
-    path = path.replace("\\", "/");
-    switch (fileType) {
-      case LOCAL:
-        return new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
-      case HDFS:
-      case VIEWFS:
-        Path pt = new Path(path);
-        FileSystem fs = FileSystem.get(configuration);
-        FSDataInputStream stream = fs.open(pt, bufferSize);
-        return new DataInputStream(new BufferedInputStream(stream));
-      default:
-        return new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
-    }
-  }
-
-  /**
-   * return the datainputStream which is seek to the offset of file
-   *
-   * @param path
-   * @param fileType
-   * @param bufferSize
-   * @param offset
-   * @return DataInputStream
-   * @throws IOException
-   */
-  public static DataInputStream getDataInputStream(String path, FileType fileType, int bufferSize,
-      long offset) throws IOException {
-    path = path.replace("\\", "/");
-    switch (fileType) {
-      case HDFS:
-      case VIEWFS:
-        Path pt = new Path(path);
-        FileSystem fs = FileSystem.get(configuration);
-        FSDataInputStream stream = fs.open(pt, bufferSize);
-        stream.seek(offset);
-        return new DataInputStream(new BufferedInputStream(stream));
-      default:
-        FileInputStream fis = new FileInputStream(path);
-        long actualSkipSize = 0;
-        long skipSize = offset;
-        while (actualSkipSize != offset) {
-          actualSkipSize += fis.skip(skipSize);
-          skipSize = skipSize - actualSkipSize;
-        }
-        return new DataInputStream(new BufferedInputStream(fis));
-    }
-  }
-
-  public static DataOutputStream getDataOutputStream(String path, FileType fileType)
-      throws IOException {
-    path = path.replace("\\", "/");
-    switch (fileType) {
-      case LOCAL:
-        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
-      case HDFS:
-      case VIEWFS:
-        Path pt = new Path(path);
-        FileSystem fs = pt.getFileSystem(configuration);
-        FSDataOutputStream stream = fs.create(pt, true);
-        return stream;
-      default:
-        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
-    }
-  }
-
-  public static DataOutputStream getDataOutputStream(String path, FileType fileType,
-      short replicationFactor) throws IOException {
-    path = path.replace("\\", "/");
-    switch (fileType) {
-      case LOCAL:
-        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
-      case HDFS:
-      case VIEWFS:
-        Path pt = new Path(path);
-        FileSystem fs = pt.getFileSystem(configuration);
-        FSDataOutputStream stream = fs.create(pt, replicationFactor);
-        return stream;
-      default:
-        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
-    }
-  }
-
-  public static DataOutputStream getDataOutputStream(String path, FileType fileType, int bufferSize)
-      throws IOException {
-    path = path.replace("\\", "/");
-    switch (fileType) {
-      case LOCAL:
-        return new DataOutputStream(
-            new BufferedOutputStream(new FileOutputStream(path), bufferSize));
-      case HDFS:
-      case VIEWFS:
-        Path pt = new Path(path);
-        FileSystem fs = pt.getFileSystem(configuration);
-        FSDataOutputStream stream = fs.create(pt, true, bufferSize);
-        return stream;
-      default:
-        return new DataOutputStream(
-            new BufferedOutputStream(new FileOutputStream(path), bufferSize));
-    }
-  }
-
-  public static DataOutputStream getDataOutputStream(String path, FileType fileType, int bufferSize,
-      boolean append) throws IOException {
-    path = path.replace("\\", "/");
-    switch (fileType) {
-      case LOCAL:
-        return new DataOutputStream(
-            new BufferedOutputStream(new FileOutputStream(path, append), bufferSize));
-      case HDFS:
-      case VIEWFS:
-        Path pt = new Path(path);
-        FileSystem fs = pt.getFileSystem(configuration);
-        FSDataOutputStream stream = null;
-        if (append) {
-          // append to a file only if file already exists else file not found
-          // exception will be thrown by hdfs
-          if (CarbonUtil.isFileExists(path)) {
-            stream = fs.append(pt, bufferSize);
-          } else {
-            stream = fs.create(pt, true, bufferSize);
-          }
-        } else {
-          stream = fs.create(pt, true, bufferSize);
-        }
-        return stream;
-      default:
-        return new DataOutputStream(
-            new BufferedOutputStream(new FileOutputStream(path), bufferSize));
-    }
-  }
-
-  public static DataOutputStream getDataOutputStream(String path, FileType fileType, int bufferSize,
-      long blockSize) throws IOException {
-    path = path.replace("\\", "/");
-    switch (fileType) {
-      case LOCAL:
-        return new DataOutputStream(
-            new BufferedOutputStream(new FileOutputStream(path), bufferSize));
-      case HDFS:
-      case VIEWFS:
-        Path pt = new Path(path);
-        FileSystem fs = pt.getFileSystem(configuration);
-        FSDataOutputStream stream =
-            fs.create(pt, true, bufferSize, fs.getDefaultReplication(pt), blockSize);
-        return stream;
-      default:
-        return new DataOutputStream(
-            new BufferedOutputStream(new FileOutputStream(path), bufferSize));
-    }
-  }
-
-  /**
-   * This method checks the given path exists or not and also is it file or
-   * not if the performFileCheck is true
-   *
-   * @param filePath         - Path
-   * @param fileType         - FileType Local/HDFS
-   * @param performFileCheck - Provide false for folders, true for files and
-   */
-  public static boolean isFileExist(String filePath, FileType fileType, boolean performFileCheck)
-      throws IOException {
-    filePath = filePath.replace("\\", "/");
-    switch (fileType) {
-      case HDFS:
-      case VIEWFS:
-        Path path = new Path(filePath);
-        FileSystem fs = path.getFileSystem(configuration);
-        if (performFileCheck) {
-          return fs.exists(path) && fs.isFile(path);
-        } else {
-          return fs.exists(path);
-        }
-
-      case LOCAL:
-      default:
-        File defaultFile = new File(filePath);
-
-        if (performFileCheck) {
-          return defaultFile.exists() && defaultFile.isFile();
-        } else {
-          return defaultFile.exists();
-        }
-    }
-  }
-
-  /**
-   * This method checks the given path exists or not and also is it file or
-   * not if the performFileCheck is true
-   *
-   * @param filePath - Path
-   * @param fileType - FileType Local/HDFS
-   */
-  public static boolean isFileExist(String filePath, FileType fileType) throws IOException {
-    filePath = filePath.replace("\\", "/");
-    switch (fileType) {
-      case HDFS:
-      case VIEWFS:
-        Path path = new Path(filePath);
-        FileSystem fs = path.getFileSystem(configuration);
-        return fs.exists(path);
-
-      case LOCAL:
-      default:
-        File defaultFile = new File(filePath);
-        return defaultFile.exists();
-    }
-  }
-
-  public static boolean createNewFile(String filePath, FileType fileType) throws IOException {
-    filePath = filePath.replace("\\", "/");
-    switch (fileType) {
-      case HDFS:
-      case VIEWFS:
-        Path path = new Path(filePath);
-        FileSystem fs = path.getFileSystem(configuration);
-        return fs.createNewFile(path);
-
-      case LOCAL:
-      default:
-        File file = new File(filePath);
-        return file.createNewFile();
-    }
-  }
-
-  public static boolean mkdirs(String filePath, FileType fileType) throws IOException {
-    filePath = filePath.replace("\\", "/");
-    switch (fileType) {
-      case HDFS:
-      case VIEWFS:
-        Path path = new Path(filePath);
-        FileSystem fs = path.getFileSystem(configuration);
-        return fs.mkdirs(path);
-      case LOCAL:
-      default:
-        File file = new File(filePath);
-        return file.mkdirs();
-    }
-  }
-
-  /**
-   * for getting the dataoutput stream using the hdfs filesystem append API.
-   *
-   * @param path
-   * @param fileType
-   * @return
-   * @throws IOException
-   */
-  public static DataOutputStream getDataOutputStreamUsingAppend(String path, FileType fileType)
-      throws IOException {
-    path = path.replace("\\", "/");
-    switch (fileType) {
-      case LOCAL:
-        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path, true)));
-      case HDFS:
-      case VIEWFS:
-        Path pt = new Path(path);
-        FileSystem fs = pt.getFileSystem(configuration);
-        FSDataOutputStream stream = fs.append(pt);
-        return stream;
-      default:
-        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
-    }
-  }
-
-  /**
-   * for creating a new Lock file and if it is successfully created
-   * then in case of abrupt shutdown then the stream to that file will be closed.
-   *
-   * @param filePath
-   * @param fileType
-   * @return
-   * @throws IOException
-   */
-  public static boolean createNewLockFile(String filePath, FileType fileType) throws IOException {
-    filePath = filePath.replace("\\", "/");
-    switch (fileType) {
-      case HDFS:
-      case VIEWFS:
-        Path path = new Path(filePath);
-        FileSystem fs = path.getFileSystem(configuration);
-        if (fs.createNewFile(path)) {
-          fs.deleteOnExit(path);
-          return true;
-        }
-        return false;
-      case LOCAL:
-      default:
-        File file = new File(filePath);
-        return file.createNewFile();
-    }
-  }
-
-  public enum FileType {
-    LOCAL, HDFS, VIEWFS
-  }
-
-  /**
-   * below method will be used to update the file path
-   * for local type
-   * it removes the file:/ from the path
-   *
-   * @param filePath
-   * @return updated file path without url for local
-   */
-  public static String getUpdatedFilePath(String filePath) {
-    FileType fileType = getFileType(filePath);
-    switch (fileType) {
-      case HDFS:
-      case VIEWFS:
-        return filePath;
-      case LOCAL:
-      default:
-        Path pathWithoutSchemeAndAuthority =
-            Path.getPathWithoutSchemeAndAuthority(new Path(filePath));
-        return pathWithoutSchemeAndAuthority.toString();
-    }
-  }
-
-  /**
-   * It computes size of directory
-   *
-   * @param filePath
-   * @return size in bytes
-   * @throws IOException
-   */
-  public static long getDirectorySize(String filePath) throws IOException {
-    FileType fileType = getFileType(filePath);
-    switch (fileType) {
-      case HDFS:
-      case VIEWFS:
-        Path path = new Path(filePath);
-        FileSystem fs = path.getFileSystem(configuration);
-        return fs.getContentSummary(path).getLength();
-      case LOCAL:
-      default:
-        File file = new File(filePath);
-        return FileUtils.sizeOfDirectory(file);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/FileHolderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/FileHolderImpl.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/FileHolderImpl.java
deleted file mode 100644
index 1801498..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/FileHolderImpl.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl;
-
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.FileHolder;
-
-public class FileHolderImpl implements FileHolder {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(FileHolderImpl.class.getName());
-  /**
-   * cache to hold filename and its stream
-   */
-  private Map<String, FileChannel> fileNameAndStreamCache;
-
-  /**
-   * FileHolderImpl Constructor
-   * It will create the cache
-   */
-  public FileHolderImpl() {
-    this.fileNameAndStreamCache =
-        new HashMap<String, FileChannel>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-  }
-
-  public FileHolderImpl(int capacity) {
-    this.fileNameAndStreamCache = new HashMap<String, FileChannel>(capacity);
-  }
-
-  /**
-   * This method will be used to read the byte array from file based on offset
-   * and length(number of bytes) need to read
-   *
-   * @param filePath fully qualified file path
-   * @param offset   reading start position,
-   * @param length   number of bytes to be read
-   * @return read byte array
-   */
-  @Override public byte[] readByteArray(String filePath, long offset, int length) {
-    FileChannel fileChannel = updateCache(filePath);
-    ByteBuffer byteBffer = read(fileChannel, length, offset);
-    return byteBffer.array();
-  }
-
-  /**
-   * This method will be used to close all the streams currently present in the cache
-   */
-  @Override public void finish() {
-
-    for (Entry<String, FileChannel> entry : fileNameAndStreamCache.entrySet()) {
-      try {
-        FileChannel channel = entry.getValue();
-        if (null != channel) {
-          channel.close();
-        }
-      } catch (IOException exception) {
-        LOGGER.error(exception, exception.getMessage());
-      }
-    }
-
-  }
-
-  /**
-   * This method will be used to read int from file from postion(offset), here
-   * length will be always 4 bacause int byte size if 4
-   *
-   * @param filePath fully qualified file path
-   * @param offset   reading start position,
-   * @return read int
-   */
-  @Override public int readInt(String filePath, long offset) {
-    FileChannel fileChannel = updateCache(filePath);
-    ByteBuffer byteBffer = read(fileChannel, CarbonCommonConstants.INT_SIZE_IN_BYTE, offset);
-    return byteBffer.getInt();
-  }
-
-  /**
-   * This method will be used to read int from file from postion(offset), here
-   * length will be always 4 bacause int byte size if 4
-   *
-   * @param filePath fully qualified file path
-   * @return read int
-   */
-  @Override public int readInt(String filePath) {
-    FileChannel fileChannel = updateCache(filePath);
-    ByteBuffer byteBffer = read(fileChannel, CarbonCommonConstants.INT_SIZE_IN_BYTE);
-    return byteBffer.getInt();
-  }
-
-  /**
-   * This method will be used to read int from file from postion(offset), here
-   * length will be always 4 bacause int byte size if 4
-   *
-   * @param filePath fully qualified file path
-   * @param offset   reading start position,
-   * @return read int
-   */
-  @Override public long readDouble(String filePath, long offset) {
-    FileChannel fileChannel = updateCache(filePath);
-    ByteBuffer byteBffer = read(fileChannel, CarbonCommonConstants.LONG_SIZE_IN_BYTE, offset);
-    return byteBffer.getLong();
-  }
-
-  /**
-   * This method will be used to check whether stream is already present in
-   * cache or not for filepath if not present then create it and then add to
-   * cache, other wise get from cache
-   *
-   * @param filePath fully qualified file path
-   * @return channel
-   */
-  private FileChannel updateCache(String filePath) {
-    FileChannel fileChannel = fileNameAndStreamCache.get(filePath);
-    try {
-      if (null == fileChannel) {
-        FileInputStream stream = new FileInputStream(filePath);
-        fileChannel = stream.getChannel();
-        fileNameAndStreamCache.put(filePath, fileChannel);
-      }
-    } catch (IOException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return fileChannel;
-  }
-
-  /**
-   * This method will be used to read from file based on number of bytes to be read and positon
-   *
-   * @param channel file channel
-   * @param size    number of bytes
-   * @param offset  position
-   * @return byte buffer
-   */
-  private ByteBuffer read(FileChannel channel, int size, long offset) {
-    ByteBuffer byteBffer = ByteBuffer.allocate(size);
-    try {
-      channel.position(offset);
-      channel.read(byteBffer);
-    } catch (Exception e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    byteBffer.rewind();
-    return byteBffer;
-  }
-
-  /**
-   * This method will be used to read from file based on number of bytes to be read and positon
-   *
-   * @param channel file channel
-   * @param size    number of bytes
-   * @return byte buffer
-   */
-  private ByteBuffer read(FileChannel channel, int size) {
-    ByteBuffer byteBffer = ByteBuffer.allocate(size);
-    try {
-      channel.read(byteBffer);
-    } catch (Exception e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    byteBffer.rewind();
-    return byteBffer;
-  }
-
-
-  /**
-   * This method will be used to read the byte array from file based on length(number of bytes)
-   *
-   * @param filePath fully qualified file path
-   * @param length   number of bytes to be read
-   * @return read byte array
-   */
-  @Override public byte[] readByteArray(String filePath, int length) {
-    FileChannel fileChannel = updateCache(filePath);
-    ByteBuffer byteBffer = read(fileChannel, length);
-    return byteBffer.array();
-  }
-
-  /**
-   * This method will be used to read long from file from postion(offset), here
-   * length will be always 8 bacause int byte size is 8
-   *
-   * @param filePath fully qualified file path
-   * @param offset   reading start position,
-   * @return read long
-   */
-  @Override public long readLong(String filePath, long offset) {
-    FileChannel fileChannel = updateCache(filePath);
-    ByteBuffer byteBffer = read(fileChannel, CarbonCommonConstants.LONG_SIZE_IN_BYTE, offset);
-    return byteBffer.getLong();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/MemoryMappedFileHolderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/MemoryMappedFileHolderImpl.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/MemoryMappedFileHolderImpl.java
deleted file mode 100644
index bb3e7a8..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/MemoryMappedFileHolderImpl.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.datastorage.store.impl;
-
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.MappedByteBuffer;
-import java.nio.channels.FileChannel;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.FileHolder;
-
-public class MemoryMappedFileHolderImpl implements FileHolder {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(MemoryMappedFileHolderImpl.class.getName());
-
-  private Map<String, FileChannel> fileNameAndStreamCache;
-  private Map<String, MappedByteBuffer> fileNameAndMemoryMappedFileCache;
-
-  public MemoryMappedFileHolderImpl() {
-    this(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-  }
-
-  public MemoryMappedFileHolderImpl(int capacity) {
-    this.fileNameAndStreamCache = new HashMap<String, FileChannel>(capacity);
-    this.fileNameAndMemoryMappedFileCache = new HashMap<String, MappedByteBuffer>(capacity);
-  }
-
-  private MappedByteBuffer updateCache(String filePath) {
-    MappedByteBuffer byteBuffer = fileNameAndMemoryMappedFileCache.get(filePath);
-    try {
-      if (null == byteBuffer) {
-        FileChannel fileChannel = new RandomAccessFile(filePath, "r").getChannel();
-        byteBuffer = fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, fileChannel.size());
-        fileNameAndStreamCache.put(filePath, fileChannel);
-        fileNameAndMemoryMappedFileCache.put(filePath, byteBuffer);
-      }
-    } catch (IOException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return byteBuffer;
-  }
-
-  @Override
-  public byte[] readByteArray(String filePath, long offset, int length) {
-    byte[] dst = new byte[length];
-    updateCache(filePath).get(dst, (int)offset, length);
-    return dst;
-  }
-
-  @Override
-  public byte[] readByteArray(String filePath, int length) {
-    byte[] dst = new byte[length];
-    updateCache(filePath).get(dst);
-    return dst;
-  }
-
-  @Override
-  public int readInt(String filePath, long offset) {
-    byte[] dst = readByteArray(filePath, offset, CarbonCommonConstants.INT_SIZE_IN_BYTE);
-    return ByteBuffer.wrap(dst).getInt();
-  }
-
-  @Override
-  public long readLong(String filePath, long offset) {
-    byte[] dst = readByteArray(filePath, offset, CarbonCommonConstants.LONG_SIZE_IN_BYTE);
-    return ByteBuffer.wrap(dst).getLong();
-  }
-
-  @Override
-  public int readInt(String filePath) {
-    return updateCache(filePath).getInt();
-  }
-
-  @Override
-  public long readDouble(String filePath, long offset) {
-    byte[] dst = readByteArray(filePath, offset, CarbonCommonConstants.LONG_SIZE_IN_BYTE);
-    return ByteBuffer.wrap(dst).getLong();
-  }
-
-  @Override
-  public void finish() {
-    fileNameAndMemoryMappedFileCache.clear();
-    for (Entry<String, FileChannel> entry : fileNameAndStreamCache.entrySet()) {
-      try {
-        FileChannel channel = entry.getValue();
-        if (null != channel) {
-          channel.close();
-        }
-      } catch (IOException exception) {
-        LOGGER.error(exception, exception.getMessage());
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/AbstractHeavyCompressedDoubleArrayDataStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/AbstractHeavyCompressedDoubleArrayDataStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/AbstractHeavyCompressedDoubleArrayDataStore.java
deleted file mode 100644
index f2fc0dd..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/AbstractHeavyCompressedDoubleArrayDataStore.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.data.compressed;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.NodeMeasureDataStore;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonWriteDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public abstract class AbstractHeavyCompressedDoubleArrayDataStore
-    implements NodeMeasureDataStore //NodeMeasureDataStore<double[]>
-{
-
-  /**
-   * values.
-   */
-  protected ValueCompressonHolder.UnCompressValue[] values;
-
-  /**
-   * compressionModel.
-   */
-  protected ValueCompressionModel compressionModel;
-
-  /**
-   * type
-   */
-  private char[] type;
-
-  /**
-   * AbstractHeavyCompressedDoubleArrayDataStore constructor.
-   *
-   * @param compressionModel
-   */
-  public AbstractHeavyCompressedDoubleArrayDataStore(ValueCompressionModel compressionModel) {
-    this.compressionModel = compressionModel;
-    if (null != compressionModel) {
-      this.type = compressionModel.getType();
-      values =
-          new ValueCompressonHolder.UnCompressValue[compressionModel.getUnCompressValues().length];
-    }
-  }
-
-  @Override public byte[][] getWritableMeasureDataArray(CarbonWriteDataHolder[] dataHolder) {
-    for (int i = 0; i < compressionModel.getUnCompressValues().length; i++) {
-      values[i] = compressionModel.getUnCompressValues()[i].getNew();
-      if (type[i] != CarbonCommonConstants.BYTE_VALUE_MEASURE
-          && type[i] != CarbonCommonConstants.BIG_DECIMAL_MEASURE) {
-        if (type[i] == CarbonCommonConstants.BIG_INT_MEASURE) {
-          values[i].setValue(ValueCompressionUtil
-              .getCompressedValues(compressionModel.getCompType()[i],
-                  dataHolder[i].getWritableLongValues(), compressionModel.getChangedDataType()[i],
-                  (long) compressionModel.getMaxValue()[i], compressionModel.getDecimal()[i]));
-        } else {
-          values[i].setValue(ValueCompressionUtil
-              .getCompressedValues(compressionModel.getCompType()[i],
-                  dataHolder[i].getWritableDoubleValues(), compressionModel.getChangedDataType()[i],
-                  (double) compressionModel.getMaxValue()[i], compressionModel.getDecimal()[i]));
-        }
-      } else {
-        values[i].setValue(dataHolder[i].getWritableByteArrayValues());
-      }
-      values[i] = values[i].compress();
-    }
-    byte[][] returnValue = new byte[values.length][];
-    for (int i = 0; i < values.length; i++) {
-      returnValue[i] = values[i].getBackArrayData();
-    }
-    return returnValue;
-  }
-
-  @Override public short getLength() {
-    return values != null ? (short) values.length : 0;
-  }
-
-}


[51/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
Renamed packages to org.apache.carbondata and fixed errors


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/cd6a4ff3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/cd6a4ff3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/cd6a4ff3

Branch: refs/heads/master
Commit: cd6a4ff33a0a7ab97e820d82d2aa3044351d0cae
Parents: d545910
Author: ravipesala <ra...@gmail.com>
Authored: Sun Aug 14 22:10:43 2016 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Sun Aug 14 22:10:49 2016 +0530

----------------------------------------------------------------------
 .../carbondata/common/CarbonIterator.java       |   38 +
 .../carbondata/common/logging/LogService.java   |   47 +
 .../common/logging/LogServiceFactory.java       |   46 +
 .../impl/AuditExtendedRollingFileAppender.java  |   41 +
 .../common/logging/impl/AuditLevel.java         |   61 +
 .../impl/ExtendedRollingFileAppender.java       |  239 +
 .../common/logging/impl/FileUtil.java           |   96 +
 .../common/logging/impl/StandardLogService.java |  317 +
 .../common/logging/impl/StatisticLevel.java     |   64 +
 .../org/carbondata/common/CarbonIterator.java   |   38 -
 .../carbondata/common/logging/LogService.java   |   47 -
 .../common/logging/LogServiceFactory.java       |   46 -
 .../impl/AuditExtendedRollingFileAppender.java  |   41 -
 .../common/logging/impl/AuditLevel.java         |   61 -
 .../impl/ExtendedRollingFileAppender.java       |  239 -
 .../common/logging/impl/FileUtil.java           |   96 -
 .../common/logging/impl/StandardLogService.java |  317 -
 .../common/logging/impl/StatisticLevel.java     |   64 -
 common/src/test/java/log4j.properties           |    4 +-
 .../logging/LogServiceFactoryTest_UT.java       |   42 +
 .../logging/ft/LoggingServiceTest_FT.java       |   92 +
 ...AuditExtendedRollingFileAppenderTest_UT.java |   75 +
 .../common/logging/impl/AuditLevelTest_UT.java  |   48 +
 .../ExtendedRollingFileAppenderTest_UT.java     |   71 +
 .../common/logging/impl/FileUtilTest_UT.java    |   62 +
 .../logging/impl/StandardLogServiceTest_UT.java |  157 +
 .../logging/LogServiceFactoryTest_UT.java       |   42 -
 .../logging/ft/LoggingServiceTest_FT.java       |   92 -
 ...AuditExtendedRollingFileAppenderTest_UT.java |   78 -
 .../common/logging/impl/AuditLevelTest_UT.java  |   50 -
 .../ExtendedRollingFileAppenderTest_UT.java     |   74 -
 .../common/logging/impl/FileUtilTest_UT.java    |   62 -
 .../logging/impl/StandardLogServiceTest_UT.java |  159 -
 .../common/ext/ColumnUniqueIdGenerator.java     |   41 +
 .../common/ext/DictionaryFactory.java           |  119 +
 .../carbondata/common/ext/PathFactory.java      |   48 +
 .../common/factory/CarbonCommonFactory.java     |   54 +
 .../org/apache/carbondata/core/cache/Cache.java |   71 +
 .../carbondata/core/cache/CacheProvider.java    |  154 +
 .../apache/carbondata/core/cache/CacheType.java |   62 +
 .../apache/carbondata/core/cache/Cacheable.java |   50 +
 .../carbondata/core/cache/CarbonLRUCache.java   |  251 +
 .../AbstractColumnDictionaryInfo.java           |  279 +
 .../dictionary/AbstractDictionaryCache.java     |  297 +
 .../cache/dictionary/ColumnDictionaryInfo.java  |  283 +
 .../dictionary/ColumnReverseDictionaryInfo.java |  116 +
 .../core/cache/dictionary/Dictionary.java       |  100 +
 .../dictionary/DictionaryByteArrayWrapper.java  |   94 +
 .../cache/dictionary/DictionaryCacheLoader.java |   45 +
 .../dictionary/DictionaryCacheLoaderImpl.java   |  142 +
 .../dictionary/DictionaryChunksWrapper.java     |  127 +
 .../DictionaryColumnUniqueIdentifier.java       |  113 +
 .../core/cache/dictionary/DictionaryInfo.java   |   91 +
 .../cache/dictionary/ForwardDictionary.java     |  153 +
 .../dictionary/ForwardDictionaryCache.java      |  210 +
 .../cache/dictionary/ReverseDictionary.java     |  129 +
 .../dictionary/ReverseDictionaryCache.java      |  211 +
 .../core/carbon/AbsoluteTableIdentifier.java    |  111 +
 .../core/carbon/CarbonDataLoadSchema.java       |  207 +
 .../core/carbon/CarbonTableIdentifier.java      |  131 +
 .../core/carbon/ColumnIdentifier.java           |  113 +
 .../core/carbon/datastore/BTreeBuilderInfo.java |   61 +
 .../core/carbon/datastore/BlockIndexStore.java  |  309 +
 .../core/carbon/datastore/BtreeBuilder.java     |   38 +
 .../core/carbon/datastore/DataRefNode.java      |  105 +
 .../carbon/datastore/DataRefNodeFinder.java     |   45 +
 .../core/carbon/datastore/IndexKey.java         |   62 +
 .../carbon/datastore/SegmentTaskIndexStore.java |  334 +
 .../carbon/datastore/block/AbstractIndex.java   |   70 +
 .../core/carbon/datastore/block/BlockIndex.java |   53 +
 .../carbon/datastore/block/Distributable.java   |   25 +
 .../datastore/block/SegmentProperties.java      |  748 ++
 .../datastore/block/SegmentTaskIndex.java       |   58 +
 .../carbon/datastore/block/TableBlockInfo.java  |  204 +
 .../carbon/datastore/block/TableTaskInfo.java   |  114 +
 .../carbon/datastore/block/TaskBlockInfo.java   |   68 +
 .../chunk/DimensionChunkAttributes.java         |  102 +
 .../chunk/DimensionColumnDataChunk.java         |   71 +
 .../datastore/chunk/MeasureColumnDataChunk.java |   71 +
 .../impl/ColumnGroupDimensionDataChunk.java     |  128 +
 .../impl/FixedLengthDimensionDataChunk.java     |  123 +
 .../impl/VariableLengthDimensionDataChunk.java  |  114 +
 .../reader/DimensionColumnChunkReader.java      |   48 +
 .../chunk/reader/MeasureColumnChunkReader.java  |   47 +
 .../reader/dimension/AbstractChunkReader.java   |  143 +
 ...CompressedDimensionChunkFileBasedReader.java |  135 +
 .../measure/AbstractMeasureChunkReader.java     |   75 +
 .../CompressedMeasureChunkFileBasedReader.java  |   92 +
 .../exception/IndexBuilderException.java        |   96 +
 .../impl/btree/AbstractBTreeBuilder.java        |  165 +
 .../impl/btree/AbstractBTreeLeafNode.java       |  221 +
 .../impl/btree/BTreeDataRefNodeFinder.java      |  264 +
 .../carbon/datastore/impl/btree/BTreeNode.java  |   71 +
 .../datastore/impl/btree/BTreeNonLeafNode.java  |  232 +
 .../datastore/impl/btree/BlockBTreeBuilder.java |  118 +
 .../impl/btree/BlockBTreeLeafNode.java          |   64 +
 .../impl/btree/BlockletBTreeBuilder.java        |  104 +
 .../impl/btree/BlockletBTreeLeafNode.java       |  132 +
 .../core/carbon/metadata/CarbonMetadata.java    |  165 +
 .../carbon/metadata/blocklet/BlockletInfo.java  |  114 +
 .../metadata/blocklet/DataFileFooter.java       |  170 +
 .../carbon/metadata/blocklet/SegmentInfo.java   |   72 +
 .../compressor/ChunkCompressorMeta.java         |   92 +
 .../blocklet/compressor/CompressionCodec.java   |   31 +
 .../metadata/blocklet/datachunk/DataChunk.java  |  327 +
 .../blocklet/datachunk/PresenceMeta.java        |   66 +
 .../blocklet/index/BlockletBTreeIndex.java      |   76 +
 .../metadata/blocklet/index/BlockletIndex.java  |   77 +
 .../blocklet/index/BlockletMinMaxIndex.java     |   83 +
 .../metadata/blocklet/sort/SortState.java       |   38 +
 .../metadata/converter/SchemaConverter.java     |  105 +
 .../ThriftWrapperSchemaConverterImpl.java       |  386 +
 .../carbon/metadata/datatype/ConvertedType.java |  122 +
 .../core/carbon/metadata/datatype/DataType.java |   48 +
 .../core/carbon/metadata/encoder/Encoding.java  |   31 +
 .../carbon/metadata/index/BlockIndexInfo.java   |   92 +
 .../carbon/metadata/schema/SchemaEvolution.java |   52 +
 .../metadata/schema/SchemaEvolutionEntry.java   |   93 +
 .../metadata/schema/table/CarbonTable.java      |  393 +
 .../carbon/metadata/schema/table/TableInfo.java |  239 +
 .../metadata/schema/table/TableSchema.java      |  185 +
 .../schema/table/column/CarbonColumn.java       |  174 +
 .../schema/table/column/CarbonDimension.java    |  154 +
 .../schema/table/column/CarbonMeasure.java      |  112 +
 .../schema/table/column/ColumnSchema.java       |  418 +
 .../carbon/path/CarbonSharedDictionaryPath.java |   73 +
 .../core/carbon/path/CarbonStorePath.java       |   69 +
 .../core/carbon/path/CarbonTablePath.java       |  425 +
 .../carbon/querystatistics/QueryStatistic.java  |   85 +
 .../QueryStatisticsRecorder.java                |   74 +
 .../core/constants/CarbonCommonConstants.java   |  892 ++
 .../core/constants/IgnoreDictionary.java        |   52 +
 .../core/datastorage/store/FileHolder.java      |   87 +
 .../datastorage/store/MeasureDataWrapper.java   |   30 +
 .../core/datastorage/store/NodeKeyStore.java    |   64 +
 .../datastorage/store/NodeMeasureDataStore.java |   41 +
 .../columnar/BlockIndexerStorageForInt.java     |  226 +
 .../BlockIndexerStorageForNoInvertedIndex.java  |  159 +
 .../store/columnar/ColumnGroupModel.java        |  116 +
 .../store/columnar/ColumnWithIntIndex.java      |   82 +
 .../columnar/ColumnWithIntIndexForHighCard.java |   49 +
 .../store/columnar/ColumnarKeyStore.java        |   47 +
 .../columnar/ColumnarKeyStoreDataHolder.java    |   97 +
 .../store/columnar/ColumnarKeyStoreInfo.java    |  262 +
 .../columnar/ColumnarKeyStoreMetadata.java      |  150 +
 .../store/columnar/IndexStorage.java            |   44 +
 .../store/columnar/UnBlockIndexer.java          |   78 +
 .../store/compression/Compressor.java           |   28 +
 .../store/compression/MeasureMetaDataModel.java |  217 +
 .../store/compression/SnappyCompression.java    |  273 +
 .../compression/ValueCompressionModel.java      |  236 +
 .../compression/ValueCompressonHolder.java      |  135 +
 .../compression/type/UnCompressByteArray.java   |  137 +
 .../compression/type/UnCompressDefaultLong.java |   51 +
 .../compression/type/UnCompressMaxMinByte.java  |  107 +
 .../type/UnCompressMaxMinByteForLong.java       |   78 +
 .../type/UnCompressMaxMinDefault.java           |  108 +
 .../type/UnCompressMaxMinDefaultLong.java       |   75 +
 .../compression/type/UnCompressMaxMinFloat.java |  107 +
 .../compression/type/UnCompressMaxMinInt.java   |  105 +
 .../compression/type/UnCompressMaxMinLong.java  |  105 +
 .../compression/type/UnCompressMaxMinShort.java |  106 +
 .../type/UnCompressNonDecimalByte.java          |   97 +
 .../type/UnCompressNonDecimalDefault.java       |   97 +
 .../type/UnCompressNonDecimalFloat.java         |  101 +
 .../type/UnCompressNonDecimalInt.java           |   98 +
 .../type/UnCompressNonDecimalLong.java          |  100 +
 .../type/UnCompressNonDecimalMaxMinByte.java    |  108 +
 .../type/UnCompressNonDecimalMaxMinDefault.java |  106 +
 .../type/UnCompressNonDecimalMaxMinFloat.java   |  108 +
 .../type/UnCompressNonDecimalMaxMinInt.java     |  108 +
 .../type/UnCompressNonDecimalMaxMinLong.java    |  110 +
 .../type/UnCompressNonDecimalMaxMinShort.java   |  108 +
 .../type/UnCompressNonDecimalShort.java         |   99 +
 .../compression/type/UnCompressNoneByte.java    |  100 +
 .../compression/type/UnCompressNoneDefault.java |   96 +
 .../compression/type/UnCompressNoneFloat.java   |  101 +
 .../compression/type/UnCompressNoneInt.java     |  101 +
 .../compression/type/UnCompressNoneLong.java    |  101 +
 .../compression/type/UnCompressNoneShort.java   |  104 +
 .../store/dataholder/CarbonReadDataHolder.java  |  115 +
 .../store/dataholder/CarbonWriteDataHolder.java |  188 +
 .../store/filesystem/AbstractDFSCarbonFile.java |  217 +
 .../store/filesystem/CarbonFile.java            |   66 +
 .../store/filesystem/CarbonFileFilter.java      |   24 +
 .../store/filesystem/HDFSCarbonFile.java        |  128 +
 .../store/filesystem/LocalCarbonFile.java       |  226 +
 .../store/filesystem/ViewFSCarbonFile.java      |  126 +
 .../impl/CompressedDataMeasureDataWrapper.java  |   37 +
 .../store/impl/DFSFileHolderImpl.java           |  183 +
 .../datastorage/store/impl/FileFactory.java     |  477 +
 .../datastorage/store/impl/FileHolderImpl.java  |  221 +
 .../store/impl/MemoryMappedFileHolderImpl.java  |  118 +
 ...ractHeavyCompressedDoubleArrayDataStore.java |   94 +
 ...HeavyCompressedDoubleArrayDataFileStore.java |  110 +
 ...yCompressedDoubleArrayDataInMemoryStore.java |   76 +
 .../AbstractDoubleArrayDataStore.java           |   82 +
 .../uncompressed/DoubleArrayDataFileStore.java  |   86 +
 .../DoubleArrayDataInMemoryStore.java           |  163 +
 .../key/columnar/AbstractColumnarKeyStore.java  |  106 +
 .../CompressedColumnarFileKeyStore.java         |  168 +
 .../CompressedColumnarInMemoryStore.java        |  155 +
 .../CompressedColumnarKeyStoreUtil.java         |  108 +
 .../UnCompressedColumnarFileKeyStore.java       |   88 +
 .../UnCompressedColumnarInMemoryStore.java      |   70 +
 .../AbstractCompressedSingleArrayStore.java     |  119 +
 .../CompressedSingleArrayKeyFileStore.java      |   92 +
 .../CompressedSingleArrayKeyInMemoryStore.java  |   46 +
 .../AbstractSingleArrayKeyStore.java            |  107 +
 .../uncompressed/SingleArrayKeyFileStore.java   |  104 +
 .../SingleArrayKeyInMemoryStore.java            |   36 +
 .../core/datastorage/util/StoreFactory.java     |   62 +
 .../core/keygenerator/KeyGenException.java      |   45 +
 .../core/keygenerator/KeyGenerator.java         |  124 +
 .../keygenerator/columnar/ColumnarSplitter.java |  103 +
 .../MultiDimKeyVarLengthEquiSplitGenerator.java |  244 +
 ...tiDimKeyVarLengthVariableSplitGenerator.java |  239 +
 .../DirectDictionaryGenerator.java              |   56 +
 .../DirectDictionaryKeyGeneratorFactory.java    |   53 +
 .../TimeStampDirectDictionaryGenerator.java     |  215 +
 .../TimeStampGranularityConstants.java          |   54 +
 .../TimeStampGranularityTypeValue.java          |   63 +
 .../factory/KeyGeneratorFactory.java            |   56 +
 .../mdkey/AbstractKeyGenerator.java             |   79 +
 .../core/keygenerator/mdkey/Bits.java           |  327 +
 .../mdkey/MultiDimKeyVarLengthGenerator.java    |  117 +
 .../keygenerator/mdkey/NumberCompressor.java    |  220 +
 .../carbondata/core/load/BlockDetails.java      |   78 +
 .../core/load/LoadMetadataDetails.java          |  226 +
 .../carbondata/core/metadata/BlockletInfo.java  |  202 +
 .../core/metadata/BlockletInfoColumnar.java     |  405 +
 .../core/metadata/ValueEncoderMeta.java         |  104 +
 .../reader/CarbonDictionaryColumnMetaChunk.java |  107 +
 .../reader/CarbonDictionaryMetadataReader.java  |   58 +
 .../CarbonDictionaryMetadataReaderImpl.java     |  201 +
 .../core/reader/CarbonDictionaryReader.java     |   70 +
 .../core/reader/CarbonDictionaryReaderImpl.java |  314 +
 .../core/reader/CarbonFooterReader.java         |   78 +
 .../core/reader/CarbonIndexFileReader.java      |   95 +
 .../carbondata/core/reader/ThriftReader.java    |  146 +
 .../CarbonDictionarySortIndexReader.java        |   47 +
 .../CarbonDictionarySortIndexReaderImpl.java    |  228 +
 .../core/service/ColumnUniqueIdService.java     |   34 +
 .../core/service/DictionaryService.java         |   92 +
 .../carbondata/core/service/PathService.java    |   38 +
 .../apache/carbondata/core/util/ByteUtil.java   |  320 +
 .../core/util/CarbonFileFolderComparator.java   |   51 +
 .../core/util/CarbonLoadStatisticsDummy.java    |  104 +
 .../core/util/CarbonLoadStatisticsImpl.java     |  413 +
 .../carbondata/core/util/CarbonMergerUtil.java  |   49 +
 .../core/util/CarbonMetadataUtil.java           |  450 +
 .../carbondata/core/util/CarbonProperties.java  |  494 ++
 .../core/util/CarbonTimeStatisticsFactory.java  |   52 +
 .../apache/carbondata/core/util/CarbonUtil.java | 1428 +++
 .../core/util/CarbonUtilException.java          |   80 +
 .../core/util/DataFileFooterConverter.java      |  475 +
 .../carbondata/core/util/DataTypeUtil.java      |  410 +
 .../carbondata/core/util/LoadStatistics.java    |   63 +
 .../core/util/ValueCompressionUtil.java         | 1027 +++
 .../carbondata/core/writer/ByteArrayHolder.java |   77 +
 .../core/writer/CarbonDictionaryWriter.java     |   63 +
 .../core/writer/CarbonDictionaryWriterImpl.java |  422 +
 .../core/writer/CarbonFooterWriter.java         |   72 +
 .../core/writer/CarbonIndexFileWriter.java      |   64 +
 .../core/writer/HierarchyValueWriterForCSV.java |  320 +
 .../carbondata/core/writer/ThriftWriter.java    |  119 +
 .../exception/CarbonDataWriterException.java    |   81 +
 .../CarbonDictionarySortIndexWriter.java        |   48 +
 .../CarbonDictionarySortIndexWriterImpl.java    |  215 +
 .../sortindex/CarbonDictionarySortInfo.java     |   65 +
 .../CarbonDictionarySortInfoPreparator.java     |  150 +
 .../sortindex/CarbonDictionarySortModel.java    |  179 +
 .../scan/collector/ScannedResultCollector.java  |   38 +
 .../impl/AbstractScannedResultCollector.java    |  157 +
 .../impl/DictionaryBasedResultCollector.java    |  130 +
 .../collector/impl/RawBasedResultCollector.java |   67 +
 .../scan/complextypes/ArrayQueryType.java       |  158 +
 .../scan/complextypes/ComplexQueryType.java     |   80 +
 .../scan/complextypes/PrimitiveQueryType.java   |  175 +
 .../scan/complextypes/StructQueryType.java      |  184 +
 .../carbondata/scan/executor/QueryExecutor.java |   40 +
 .../scan/executor/QueryExecutorFactory.java     |   33 +
 .../exception/QueryExecutionException.java      |   96 +
 .../executor/impl/AbstractQueryExecutor.java    |  412 +
 .../scan/executor/impl/DetailQueryExecutor.java |   42 +
 .../executor/impl/QueryExecutorProperties.java  |   90 +
 .../scan/executor/infos/AggregatorInfo.java     |  149 +
 .../scan/executor/infos/BlockExecutionInfo.java |  681 ++
 .../scan/executor/infos/KeyStructureInfo.java   |  119 +
 .../scan/executor/infos/SortInfo.java           |  125 +
 .../scan/executor/util/QueryUtil.java           |  951 ++
 .../scan/executor/util/RestructureUtil.java     |  135 +
 .../scan/expression/BinaryExpression.java       |   59 +
 .../scan/expression/ColumnExpression.java       |  114 +
 .../carbondata/scan/expression/Expression.java  |   50 +
 .../scan/expression/ExpressionResult.java       |  472 +
 .../scan/expression/LeafExpression.java         |   24 +
 .../scan/expression/LiteralExpression.java      |   69 +
 .../scan/expression/UnaryExpression.java        |   33 +
 .../scan/expression/UnknownExpression.java      |   28 +
 .../expression/arithmetic/AddExpression.java    |   86 +
 .../arithmetic/BinaryArithmeticExpression.java  |   34 +
 .../expression/arithmetic/DivideExpression.java |   86 +
 .../arithmetic/MultiplyExpression.java          |   87 +
 .../arithmetic/SubstractExpression.java         |   88 +
 .../BinaryConditionalExpression.java            |   37 +
 .../conditional/ConditionalExpression.java      |   43 +
 .../conditional/EqualToExpression.java          |  108 +
 .../GreaterThanEqualToExpression.java           |   90 +
 .../conditional/GreaterThanExpression.java      |   93 +
 .../expression/conditional/InExpression.java    |   98 +
 .../conditional/LessThanEqualToExpression.java  |   92 +
 .../conditional/LessThanExpression.java         |   95 +
 .../expression/conditional/ListExpression.java  |   62 +
 .../conditional/NotEqualsExpression.java        |  104 +
 .../expression/conditional/NotInExpression.java |   97 +
 .../exception/FilterIllegalMemberException.java |   98 +
 .../exception/FilterUnsupportedException.java   |   92 +
 .../scan/expression/logical/AndExpression.java  |   63 +
 .../logical/BinaryLogicalExpression.java        |  127 +
 .../scan/expression/logical/NotExpression.java  |   60 +
 .../scan/expression/logical/OrExpression.java   |   62 +
 .../scan/filter/DimColumnFilterInfo.java        |   61 +
 .../scan/filter/FilterExpressionProcessor.java  |  352 +
 .../carbondata/scan/filter/FilterProcessor.java |   60 +
 .../carbondata/scan/filter/FilterUtil.java      | 1395 +++
 .../scan/filter/GenericQueryType.java           |   73 +
 .../filter/executer/AndFilterExecuterImpl.java  |   62 +
 .../executer/DimColumnExecuterFilterInfo.java   |   32 +
 .../ExcludeColGroupFilterExecuterImpl.java      |  137 +
 .../executer/ExcludeFilterExecuterImpl.java     |  188 +
 .../scan/filter/executer/FilterExecuter.java    |   45 +
 .../IncludeColGroupFilterExecuterImpl.java      |  209 +
 .../executer/IncludeFilterExecuterImpl.java     |  206 +
 .../filter/executer/OrFilterExecuterImpl.java   |   52 +
 .../executer/RestructureFilterExecuterImpl.java |   55 +
 .../executer/RowLevelFilterExecuterImpl.java    |  402 +
 .../RowLevelRangeGrtThanFiterExecuterImpl.java  |  209 +
 ...elRangeGrtrThanEquaToFilterExecuterImpl.java |  200 +
 ...velRangeLessThanEqualFilterExecuterImpl.java |  248 +
 .../RowLevelRangeLessThanFiterExecuterImpl.java |  252 +
 .../RowLevelRangeTypeExecuterFacory.java        |   93 +
 .../scan/filter/intf/ExpressionType.java        |   44 +
 .../scan/filter/intf/FilterExecuterType.java    |   28 +
 .../carbondata/scan/filter/intf/RowImpl.java    |   44 +
 .../carbondata/scan/filter/intf/RowIntf.java    |   31 +
 .../filter/resolver/AndFilterResolverImpl.java  |   56 +
 .../resolver/ConditionalFilterResolverImpl.java |  241 +
 .../filter/resolver/FilterResolverIntf.java     |  102 +
 .../resolver/LogicalFilterResolverImpl.java     |  118 +
 .../resolver/RestructureFilterResolverImpl.java |  206 +
 .../resolver/RowLevelFilterResolverImpl.java    |  141 +
 .../RowLevelRangeFilterResolverImpl.java        |  287 +
 .../metadata/FilterResolverMetadata.java        |   62 +
 .../DimColumnResolvedFilterInfo.java            |  194 +
 .../MeasureColumnResolvedFilterInfo.java        |  105 +
 .../visitable/ResolvedFilterInfoVisitable.java  |   38 +
 .../visitor/CustomTypeDictionaryVisitor.java    |  110 +
 .../visitor/DictionaryColumnVisitor.java        |   74 +
 .../visitor/FilterInfoTypeVisitorFactory.java   |   45 +
 .../visitor/NoDictionaryTypeVisitor.java        |   69 +
 .../visitor/ResolvedFilterInfoVisitorIntf.java  |   40 +
 .../carbondata/scan/model/CarbonQueryPlan.java  |  239 +
 .../carbondata/scan/model/QueryColumn.java      |  109 +
 .../carbondata/scan/model/QueryDimension.java   |   58 +
 .../carbondata/scan/model/QueryMeasure.java     |   61 +
 .../carbondata/scan/model/QueryModel.java       |  507 ++
 .../carbondata/scan/model/QuerySchemaInfo.java  |   86 +
 .../carbondata/scan/model/SortOrderType.java    |   57 +
 .../processor/AbstractDataBlockIterator.java    |  140 +
 .../scan/processor/BlockletIterator.java        |   88 +
 .../scan/processor/BlocksChunkHolder.java       |  125 +
 .../processor/impl/DataBlockIteratorImpl.java   |   63 +
 .../scan/result/AbstractScannedResult.java      |  437 +
 .../carbondata/scan/result/BatchResult.java     |  105 +
 .../apache/carbondata/scan/result/Result.java   |   70 +
 .../result/impl/FilterQueryScannedResult.java   |  147 +
 .../impl/NonFilterQueryScannedResult.java       |  146 +
 .../AbstractDetailQueryResultIterator.java      |  130 +
 .../scan/result/iterator/ChunkRowIterator.java  |   79 +
 .../iterator/DetailQueryResultIterator.java     |   88 +
 .../scan/result/iterator/RawResultIterator.java |  169 +
 .../scan/scanner/AbstractBlockletScanner.java   |   62 +
 .../scan/scanner/BlockletScanner.java           |   41 +
 .../scan/scanner/impl/FilterScanner.java        |  174 +
 .../scan/scanner/impl/NonFilterScanner.java     |   37 +
 .../scan/wrappers/ByteArrayWrapper.java         |  202 +
 .../common/ext/ColumnUniqueIdGenerator.java     |   41 -
 .../common/ext/DictionaryFactory.java           |  119 -
 .../org/carbondata/common/ext/PathFactory.java  |   48 -
 .../common/factory/CarbonCommonFactory.java     |   54 -
 .../java/org/carbondata/core/cache/Cache.java   |   71 -
 .../carbondata/core/cache/CacheProvider.java    |  154 -
 .../org/carbondata/core/cache/CacheType.java    |   62 -
 .../org/carbondata/core/cache/Cacheable.java    |   50 -
 .../carbondata/core/cache/CarbonLRUCache.java   |  251 -
 .../AbstractColumnDictionaryInfo.java           |  279 -
 .../dictionary/AbstractDictionaryCache.java     |  297 -
 .../cache/dictionary/ColumnDictionaryInfo.java  |  283 -
 .../dictionary/ColumnReverseDictionaryInfo.java |  116 -
 .../core/cache/dictionary/Dictionary.java       |  100 -
 .../dictionary/DictionaryByteArrayWrapper.java  |   94 -
 .../cache/dictionary/DictionaryCacheLoader.java |   45 -
 .../dictionary/DictionaryCacheLoaderImpl.java   |  142 -
 .../dictionary/DictionaryChunksWrapper.java     |  127 -
 .../DictionaryColumnUniqueIdentifier.java       |  113 -
 .../core/cache/dictionary/DictionaryInfo.java   |   91 -
 .../cache/dictionary/ForwardDictionary.java     |  153 -
 .../dictionary/ForwardDictionaryCache.java      |  210 -
 .../cache/dictionary/ReverseDictionary.java     |  129 -
 .../dictionary/ReverseDictionaryCache.java      |  211 -
 .../core/carbon/AbsoluteTableIdentifier.java    |  111 -
 .../core/carbon/CarbonDataLoadSchema.java       |  207 -
 .../core/carbon/CarbonTableIdentifier.java      |  131 -
 .../core/carbon/ColumnIdentifier.java           |  113 -
 .../core/carbon/datastore/BTreeBuilderInfo.java |   61 -
 .../core/carbon/datastore/BlockIndexStore.java  |  309 -
 .../core/carbon/datastore/BtreeBuilder.java     |   38 -
 .../core/carbon/datastore/DataRefNode.java      |  105 -
 .../carbon/datastore/DataRefNodeFinder.java     |   45 -
 .../core/carbon/datastore/IndexKey.java         |   62 -
 .../carbon/datastore/SegmentTaskIndexStore.java |  334 -
 .../carbon/datastore/block/AbstractIndex.java   |   70 -
 .../core/carbon/datastore/block/BlockIndex.java |   53 -
 .../carbon/datastore/block/Distributable.java   |   25 -
 .../datastore/block/SegmentProperties.java      |  748 --
 .../datastore/block/SegmentTaskIndex.java       |   58 -
 .../carbon/datastore/block/TableBlockInfo.java  |  204 -
 .../carbon/datastore/block/TableTaskInfo.java   |  114 -
 .../carbon/datastore/block/TaskBlockInfo.java   |   68 -
 .../chunk/DimensionChunkAttributes.java         |  102 -
 .../chunk/DimensionColumnDataChunk.java         |   71 -
 .../datastore/chunk/MeasureColumnDataChunk.java |   71 -
 .../impl/ColumnGroupDimensionDataChunk.java     |  128 -
 .../impl/FixedLengthDimensionDataChunk.java     |  123 -
 .../impl/VariableLengthDimensionDataChunk.java  |  114 -
 .../reader/DimensionColumnChunkReader.java      |   48 -
 .../chunk/reader/MeasureColumnChunkReader.java  |   47 -
 .../reader/dimension/AbstractChunkReader.java   |  143 -
 ...CompressedDimensionChunkFileBasedReader.java |  135 -
 .../measure/AbstractMeasureChunkReader.java     |   75 -
 .../CompressedMeasureChunkFileBasedReader.java  |   92 -
 .../exception/IndexBuilderException.java        |   96 -
 .../impl/btree/AbstractBTreeBuilder.java        |  165 -
 .../impl/btree/AbstractBTreeLeafNode.java       |  221 -
 .../impl/btree/BTreeDataRefNodeFinder.java      |  264 -
 .../carbon/datastore/impl/btree/BTreeNode.java  |   71 -
 .../datastore/impl/btree/BTreeNonLeafNode.java  |  232 -
 .../datastore/impl/btree/BlockBTreeBuilder.java |  118 -
 .../impl/btree/BlockBTreeLeafNode.java          |   64 -
 .../impl/btree/BlockletBTreeBuilder.java        |  104 -
 .../impl/btree/BlockletBTreeLeafNode.java       |  132 -
 .../core/carbon/metadata/CarbonMetadata.java    |  165 -
 .../carbon/metadata/blocklet/BlockletInfo.java  |  114 -
 .../metadata/blocklet/DataFileFooter.java       |  170 -
 .../carbon/metadata/blocklet/SegmentInfo.java   |   72 -
 .../compressor/ChunkCompressorMeta.java         |   92 -
 .../blocklet/compressor/CompressionCodec.java   |   31 -
 .../metadata/blocklet/datachunk/DataChunk.java  |  327 -
 .../blocklet/datachunk/PresenceMeta.java        |   66 -
 .../blocklet/index/BlockletBTreeIndex.java      |   76 -
 .../metadata/blocklet/index/BlockletIndex.java  |   77 -
 .../blocklet/index/BlockletMinMaxIndex.java     |   83 -
 .../metadata/blocklet/sort/SortState.java       |   38 -
 .../metadata/converter/SchemaConverter.java     |  105 -
 .../ThriftWrapperSchemaConverterImpl.java       |  382 -
 .../carbon/metadata/datatype/ConvertedType.java |  122 -
 .../core/carbon/metadata/datatype/DataType.java |   48 -
 .../core/carbon/metadata/encoder/Encoding.java  |   31 -
 .../carbon/metadata/index/BlockIndexInfo.java   |   92 -
 .../carbon/metadata/schema/SchemaEvolution.java |   52 -
 .../metadata/schema/SchemaEvolutionEntry.java   |   93 -
 .../metadata/schema/table/CarbonTable.java      |  399 -
 .../carbon/metadata/schema/table/TableInfo.java |  239 -
 .../metadata/schema/table/TableSchema.java      |  185 -
 .../schema/table/column/CarbonColumn.java       |  174 -
 .../schema/table/column/CarbonDimension.java    |  154 -
 .../schema/table/column/CarbonMeasure.java      |  112 -
 .../schema/table/column/ColumnSchema.java       |  418 -
 .../carbon/path/CarbonSharedDictionaryPath.java |   73 -
 .../core/carbon/path/CarbonStorePath.java       |   69 -
 .../core/carbon/path/CarbonTablePath.java       |  425 -
 .../carbon/querystatistics/QueryStatistic.java  |   85 -
 .../QueryStatisticsRecorder.java                |   74 -
 .../core/constants/CarbonCommonConstants.java   |  892 --
 .../core/constants/IgnoreDictionary.java        |   52 -
 .../core/datastorage/store/FileHolder.java      |   87 -
 .../datastorage/store/MeasureDataWrapper.java   |   30 -
 .../core/datastorage/store/NodeKeyStore.java    |   64 -
 .../datastorage/store/NodeMeasureDataStore.java |   41 -
 .../columnar/BlockIndexerStorageForInt.java     |  226 -
 .../BlockIndexerStorageForNoInvertedIndex.java  |  159 -
 .../store/columnar/ColumnGroupModel.java        |  116 -
 .../store/columnar/ColumnWithIntIndex.java      |   82 -
 .../columnar/ColumnWithIntIndexForHighCard.java |   49 -
 .../store/columnar/ColumnarKeyStore.java        |   47 -
 .../columnar/ColumnarKeyStoreDataHolder.java    |   97 -
 .../store/columnar/ColumnarKeyStoreInfo.java    |  262 -
 .../columnar/ColumnarKeyStoreMetadata.java      |  150 -
 .../store/columnar/IndexStorage.java            |   44 -
 .../store/columnar/UnBlockIndexer.java          |   78 -
 .../store/compression/Compressor.java           |   28 -
 .../store/compression/MeasureMetaDataModel.java |  217 -
 .../store/compression/SnappyCompression.java    |  273 -
 .../compression/ValueCompressionModel.java      |  236 -
 .../compression/ValueCompressonHolder.java      |  135 -
 .../compression/type/UnCompressByteArray.java   |  137 -
 .../compression/type/UnCompressDefaultLong.java |   51 -
 .../compression/type/UnCompressMaxMinByte.java  |  107 -
 .../type/UnCompressMaxMinByteForLong.java       |   78 -
 .../type/UnCompressMaxMinDefault.java           |  108 -
 .../type/UnCompressMaxMinDefaultLong.java       |   75 -
 .../compression/type/UnCompressMaxMinFloat.java |  107 -
 .../compression/type/UnCompressMaxMinInt.java   |  105 -
 .../compression/type/UnCompressMaxMinLong.java  |  105 -
 .../compression/type/UnCompressMaxMinShort.java |  106 -
 .../type/UnCompressNonDecimalByte.java          |   97 -
 .../type/UnCompressNonDecimalDefault.java       |   97 -
 .../type/UnCompressNonDecimalFloat.java         |  101 -
 .../type/UnCompressNonDecimalInt.java           |   98 -
 .../type/UnCompressNonDecimalLong.java          |  100 -
 .../type/UnCompressNonDecimalMaxMinByte.java    |  108 -
 .../type/UnCompressNonDecimalMaxMinDefault.java |  106 -
 .../type/UnCompressNonDecimalMaxMinFloat.java   |  108 -
 .../type/UnCompressNonDecimalMaxMinInt.java     |  108 -
 .../type/UnCompressNonDecimalMaxMinLong.java    |  110 -
 .../type/UnCompressNonDecimalMaxMinShort.java   |  108 -
 .../type/UnCompressNonDecimalShort.java         |   99 -
 .../compression/type/UnCompressNoneByte.java    |  100 -
 .../compression/type/UnCompressNoneDefault.java |   96 -
 .../compression/type/UnCompressNoneFloat.java   |  101 -
 .../compression/type/UnCompressNoneInt.java     |  101 -
 .../compression/type/UnCompressNoneLong.java    |  101 -
 .../compression/type/UnCompressNoneShort.java   |  104 -
 .../store/dataholder/CarbonReadDataHolder.java  |  115 -
 .../store/dataholder/CarbonWriteDataHolder.java |  188 -
 .../store/filesystem/AbstractDFSCarbonFile.java |  217 -
 .../store/filesystem/CarbonFile.java            |   66 -
 .../store/filesystem/CarbonFileFilter.java      |   24 -
 .../store/filesystem/HDFSCarbonFile.java        |  128 -
 .../store/filesystem/LocalCarbonFile.java       |  226 -
 .../store/filesystem/ViewFSCarbonFile.java      |  126 -
 .../impl/CompressedDataMeasureDataWrapper.java  |   37 -
 .../store/impl/DFSFileHolderImpl.java           |  183 -
 .../datastorage/store/impl/FileFactory.java     |  477 -
 .../datastorage/store/impl/FileHolderImpl.java  |  221 -
 .../store/impl/MemoryMappedFileHolderImpl.java  |  118 -
 ...ractHeavyCompressedDoubleArrayDataStore.java |   94 -
 ...HeavyCompressedDoubleArrayDataFileStore.java |  110 -
 ...yCompressedDoubleArrayDataInMemoryStore.java |   76 -
 .../AbstractDoubleArrayDataStore.java           |   82 -
 .../uncompressed/DoubleArrayDataFileStore.java  |   86 -
 .../DoubleArrayDataInMemoryStore.java           |  163 -
 .../key/columnar/AbstractColumnarKeyStore.java  |  106 -
 .../CompressedColumnarFileKeyStore.java         |  168 -
 .../CompressedColumnarInMemoryStore.java        |  155 -
 .../CompressedColumnarKeyStoreUtil.java         |  108 -
 .../UnCompressedColumnarFileKeyStore.java       |   88 -
 .../UnCompressedColumnarInMemoryStore.java      |   70 -
 .../AbstractCompressedSingleArrayStore.java     |  119 -
 .../CompressedSingleArrayKeyFileStore.java      |   92 -
 .../CompressedSingleArrayKeyInMemoryStore.java  |   46 -
 .../AbstractSingleArrayKeyStore.java            |  107 -
 .../uncompressed/SingleArrayKeyFileStore.java   |  104 -
 .../SingleArrayKeyInMemoryStore.java            |   36 -
 .../core/datastorage/util/StoreFactory.java     |   62 -
 .../core/keygenerator/KeyGenException.java      |   45 -
 .../core/keygenerator/KeyGenerator.java         |  124 -
 .../keygenerator/columnar/ColumnarSplitter.java |  103 -
 .../MultiDimKeyVarLengthEquiSplitGenerator.java |  244 -
 ...tiDimKeyVarLengthVariableSplitGenerator.java |  239 -
 .../DirectDictionaryGenerator.java              |   56 -
 .../DirectDictionaryKeyGeneratorFactory.java    |   53 -
 .../TimeStampDirectDictionaryGenerator.java     |  215 -
 .../TimeStampGranularityConstants.java          |   54 -
 .../TimeStampGranularityTypeValue.java          |   63 -
 .../factory/KeyGeneratorFactory.java            |   56 -
 .../mdkey/AbstractKeyGenerator.java             |   79 -
 .../core/keygenerator/mdkey/Bits.java           |  327 -
 .../mdkey/MultiDimKeyVarLengthGenerator.java    |  117 -
 .../keygenerator/mdkey/NumberCompressor.java    |  220 -
 .../org/carbondata/core/load/BlockDetails.java  |   78 -
 .../core/load/LoadMetadataDetails.java          |  226 -
 .../carbondata/core/metadata/BlockletInfo.java  |  202 -
 .../core/metadata/BlockletInfoColumnar.java     |  405 -
 .../core/metadata/ValueEncoderMeta.java         |  104 -
 .../reader/CarbonDictionaryColumnMetaChunk.java |  107 -
 .../reader/CarbonDictionaryMetadataReader.java  |   58 -
 .../CarbonDictionaryMetadataReaderImpl.java     |  201 -
 .../core/reader/CarbonDictionaryReader.java     |   70 -
 .../core/reader/CarbonDictionaryReaderImpl.java |  314 -
 .../core/reader/CarbonFooterReader.java         |   78 -
 .../core/reader/CarbonIndexFileReader.java      |   95 -
 .../carbondata/core/reader/ThriftReader.java    |  146 -
 .../CarbonDictionarySortIndexReader.java        |   47 -
 .../CarbonDictionarySortIndexReaderImpl.java    |  229 -
 .../core/service/ColumnUniqueIdService.java     |   34 -
 .../core/service/DictionaryService.java         |   92 -
 .../carbondata/core/service/PathService.java    |   38 -
 .../java/org/carbondata/core/util/ByteUtil.java |  320 -
 .../core/util/CarbonFileFolderComparator.java   |   51 -
 .../core/util/CarbonLoadStatisticsDummy.java    |  104 -
 .../core/util/CarbonLoadStatisticsImpl.java     |  413 -
 .../carbondata/core/util/CarbonMergerUtil.java  |   49 -
 .../core/util/CarbonMetadataUtil.java           |  450 -
 .../carbondata/core/util/CarbonProperties.java  |  494 --
 .../core/util/CarbonTimeStatisticsFactory.java  |   52 -
 .../org/carbondata/core/util/CarbonUtil.java    | 1426 ---
 .../core/util/CarbonUtilException.java          |   80 -
 .../core/util/DataFileFooterConverter.java      |  467 -
 .../org/carbondata/core/util/DataTypeUtil.java  |  410 -
 .../carbondata/core/util/LoadStatistics.java    |   63 -
 .../core/util/ValueCompressionUtil.java         | 1027 ---
 .../carbondata/core/writer/ByteArrayHolder.java |   77 -
 .../core/writer/CarbonDictionaryWriter.java     |   63 -
 .../core/writer/CarbonDictionaryWriterImpl.java |  422 -
 .../core/writer/CarbonFooterWriter.java         |   71 -
 .../core/writer/CarbonIndexFileWriter.java      |   64 -
 .../core/writer/HierarchyValueWriterForCSV.java |  320 -
 .../carbondata/core/writer/ThriftWriter.java    |  119 -
 .../exception/CarbonDataWriterException.java    |   81 -
 .../CarbonDictionarySortIndexWriter.java        |   48 -
 .../CarbonDictionarySortIndexWriterImpl.java    |  215 -
 .../sortindex/CarbonDictionarySortInfo.java     |   65 -
 .../CarbonDictionarySortInfoPreparator.java     |  150 -
 .../sortindex/CarbonDictionarySortModel.java    |  179 -
 .../scan/collector/ScannedResultCollector.java  |   38 -
 .../impl/AbstractScannedResultCollector.java    |  157 -
 .../impl/DictionaryBasedResultCollector.java    |  130 -
 .../collector/impl/RawBasedResultCollector.java |   67 -
 .../scan/complextypes/ArrayQueryType.java       |  158 -
 .../scan/complextypes/ComplexQueryType.java     |   80 -
 .../scan/complextypes/PrimitiveQueryType.java   |  175 -
 .../scan/complextypes/StructQueryType.java      |  184 -
 .../carbondata/scan/executor/QueryExecutor.java |   40 -
 .../scan/executor/QueryExecutorFactory.java     |   33 -
 .../exception/QueryExecutionException.java      |   96 -
 .../executor/impl/AbstractQueryExecutor.java    |  412 -
 .../scan/executor/impl/DetailQueryExecutor.java |   42 -
 .../executor/impl/QueryExecutorProperties.java  |   90 -
 .../scan/executor/infos/AggregatorInfo.java     |  149 -
 .../scan/executor/infos/BlockExecutionInfo.java |  681 --
 .../scan/executor/infos/KeyStructureInfo.java   |  119 -
 .../scan/executor/infos/SortInfo.java           |  125 -
 .../scan/executor/util/QueryUtil.java           |  951 --
 .../scan/executor/util/RestructureUtil.java     |  135 -
 .../scan/expression/BinaryExpression.java       |   59 -
 .../scan/expression/ColumnExpression.java       |  114 -
 .../carbondata/scan/expression/Expression.java  |   50 -
 .../scan/expression/ExpressionResult.java       |  472 -
 .../scan/expression/LeafExpression.java         |   24 -
 .../scan/expression/LiteralExpression.java      |   69 -
 .../scan/expression/UnaryExpression.java        |   33 -
 .../scan/expression/UnknownExpression.java      |   28 -
 .../expression/arithmetic/AddExpression.java    |   86 -
 .../arithmetic/BinaryArithmeticExpression.java  |   34 -
 .../expression/arithmetic/DivideExpression.java |   86 -
 .../arithmetic/MultiplyExpression.java          |   87 -
 .../arithmetic/SubstractExpression.java         |   88 -
 .../BinaryConditionalExpression.java            |   37 -
 .../conditional/ConditionalExpression.java      |   43 -
 .../conditional/EqualToExpression.java          |  108 -
 .../GreaterThanEqualToExpression.java           |   90 -
 .../conditional/GreaterThanExpression.java      |   93 -
 .../expression/conditional/InExpression.java    |   98 -
 .../conditional/LessThanEqualToExpression.java  |   92 -
 .../conditional/LessThanExpression.java         |   95 -
 .../expression/conditional/ListExpression.java  |   62 -
 .../conditional/NotEqualsExpression.java        |  104 -
 .../expression/conditional/NotInExpression.java |   97 -
 .../exception/FilterIllegalMemberException.java |   98 -
 .../exception/FilterUnsupportedException.java   |   92 -
 .../scan/expression/logical/AndExpression.java  |   63 -
 .../logical/BinaryLogicalExpression.java        |  127 -
 .../scan/expression/logical/NotExpression.java  |   60 -
 .../scan/expression/logical/OrExpression.java   |   62 -
 .../scan/filter/DimColumnFilterInfo.java        |   61 -
 .../scan/filter/FilterExpressionProcessor.java  |  352 -
 .../carbondata/scan/filter/FilterProcessor.java |   60 -
 .../org/carbondata/scan/filter/FilterUtil.java  | 1395 ---
 .../scan/filter/GenericQueryType.java           |   73 -
 .../filter/executer/AndFilterExecuterImpl.java  |   62 -
 .../executer/DimColumnExecuterFilterInfo.java   |   32 -
 .../ExcludeColGroupFilterExecuterImpl.java      |  137 -
 .../executer/ExcludeFilterExecuterImpl.java     |  188 -
 .../scan/filter/executer/FilterExecuter.java    |   45 -
 .../IncludeColGroupFilterExecuterImpl.java      |  209 -
 .../executer/IncludeFilterExecuterImpl.java     |  206 -
 .../filter/executer/OrFilterExecuterImpl.java   |   52 -
 .../executer/RestructureFilterExecuterImpl.java |   55 -
 .../executer/RowLevelFilterExecuterImpl.java    |  402 -
 .../RowLevelRangeGrtThanFiterExecuterImpl.java  |  209 -
 ...elRangeGrtrThanEquaToFilterExecuterImpl.java |  200 -
 ...velRangeLessThanEqualFilterExecuterImpl.java |  248 -
 .../RowLevelRangeLessThanFiterExecuterImpl.java |  252 -
 .../RowLevelRangeTypeExecuterFacory.java        |   93 -
 .../scan/filter/intf/ExpressionType.java        |   44 -
 .../scan/filter/intf/FilterExecuterType.java    |   28 -
 .../carbondata/scan/filter/intf/RowImpl.java    |   44 -
 .../carbondata/scan/filter/intf/RowIntf.java    |   31 -
 .../filter/resolver/AndFilterResolverImpl.java  |   56 -
 .../resolver/ConditionalFilterResolverImpl.java |  241 -
 .../filter/resolver/FilterResolverIntf.java     |  102 -
 .../resolver/LogicalFilterResolverImpl.java     |  118 -
 .../resolver/RestructureFilterResolverImpl.java |  206 -
 .../resolver/RowLevelFilterResolverImpl.java    |  141 -
 .../RowLevelRangeFilterResolverImpl.java        |  287 -
 .../metadata/FilterResolverMetadata.java        |   62 -
 .../DimColumnResolvedFilterInfo.java            |  194 -
 .../MeasureColumnResolvedFilterInfo.java        |  105 -
 .../visitable/ResolvedFilterInfoVisitable.java  |   38 -
 .../visitor/CustomTypeDictionaryVisitor.java    |  110 -
 .../visitor/DictionaryColumnVisitor.java        |   74 -
 .../visitor/FilterInfoTypeVisitorFactory.java   |   45 -
 .../visitor/NoDictionaryTypeVisitor.java        |   69 -
 .../visitor/ResolvedFilterInfoVisitorIntf.java  |   40 -
 .../carbondata/scan/model/CarbonQueryPlan.java  |  239 -
 .../org/carbondata/scan/model/QueryColumn.java  |  109 -
 .../carbondata/scan/model/QueryDimension.java   |   58 -
 .../org/carbondata/scan/model/QueryMeasure.java |   61 -
 .../org/carbondata/scan/model/QueryModel.java   |  507 --
 .../carbondata/scan/model/QuerySchemaInfo.java  |   86 -
 .../carbondata/scan/model/SortOrderType.java    |   57 -
 .../processor/AbstractDataBlockIterator.java    |  140 -
 .../scan/processor/BlockletIterator.java        |   88 -
 .../scan/processor/BlocksChunkHolder.java       |  125 -
 .../processor/impl/DataBlockIteratorImpl.java   |   63 -
 .../scan/result/AbstractScannedResult.java      |  437 -
 .../org/carbondata/scan/result/BatchResult.java |  105 -
 .../java/org/carbondata/scan/result/Result.java |   70 -
 .../result/impl/FilterQueryScannedResult.java   |  147 -
 .../impl/NonFilterQueryScannedResult.java       |  146 -
 .../AbstractDetailQueryResultIterator.java      |  130 -
 .../scan/result/iterator/ChunkRowIterator.java  |   79 -
 .../iterator/DetailQueryResultIterator.java     |   88 -
 .../scan/result/iterator/RawResultIterator.java |  169 -
 .../scan/scanner/AbstractBlockletScanner.java   |   62 -
 .../scan/scanner/BlockletScanner.java           |   41 -
 .../scan/scanner/impl/FilterScanner.java        |  174 -
 .../scan/scanner/impl/NonFilterScanner.java     |   37 -
 .../scan/wrappers/ByteArrayWrapper.java         |  202 -
 .../core/cache/CacheProviderTest.java           |   67 +
 .../dictionary/AbstractDictionaryCacheTest.java |  177 +
 .../dictionary/DictionaryChunksWrapperTest.java |  110 +
 .../dictionary/ForwardDictionaryCacheTest.java  |  278 +
 .../dictionary/ReverseDictionaryCacheTest.java  |  274 +
 .../carbon/datastore/BlockIndexStoreTest.java   |  204 +
 .../datastore/block/SegmentPropertiesTest.java  |  352 +
 .../block/SegmentPropertiesTestUtil.java        |  234 +
 .../impl/btree/BTreeBlockFinderTest.java        |  367 +
 .../carbon/metadata/CarbonMetadataTest.java     |  148 +
 .../metadata/schema/table/CarbonTableTest.java  |  119 +
 .../table/CarbonTableWithComplexTypesTest.java  |  159 +
 .../metadata/schema/table/TableInfoTest.java    |   52 +
 .../metadata/schema/table/TableSchemaTest.java  |   51 +
 .../CarbonFormatDirectoryStructureTest.java     |   73 +
 .../path/CarbonFormatSharedDictionaryTest.java  |   48 +
 ...CarbonDictionarySortIndexReaderImplTest.java |  135 +
 .../carbondata/core/util/ByteUtilTest.java      |  167 +
 .../carbondata/core/util/CarbonUtilTest.java    |   35 +
 .../writer/CarbonDictionaryWriterImplTest.java  |  528 ++
 .../core/writer/CarbonFooterWriterTest.java     |  213 +
 ...CarbonDictionarySortIndexWriterImplTest.java |  158 +
 .../carbondata/scan/QueryExecutor_UT.java       |   35 +
 .../scan/executor/util/QueryUtilTest.java       |  133 +
 .../core/cache/CacheProviderTest.java           |   67 -
 .../dictionary/AbstractDictionaryCacheTest.java |  177 -
 .../dictionary/DictionaryChunksWrapperTest.java |  110 -
 .../dictionary/ForwardDictionaryCacheTest.java  |  278 -
 .../dictionary/ReverseDictionaryCacheTest.java  |  274 -
 .../carbon/datastore/BlockIndexStoreTest.java   |  204 -
 .../datastore/block/SegmentPropertiesTest.java  |  352 -
 .../block/SegmentPropertiesTestUtil.java        |  234 -
 .../impl/btree/BTreeBlockFinderTest.java        |  367 -
 .../carbon/metadata/CarbonMetadataTest.java     |  148 -
 .../metadata/schema/table/CarbonTableTest.java  |  119 -
 .../table/CarbonTableWithComplexTypesTest.java  |  159 -
 .../metadata/schema/table/TableInfoTest.java    |   52 -
 .../metadata/schema/table/TableSchemaTest.java  |   51 -
 .../CarbonFormatDirectoryStructureTest.java     |   73 -
 .../path/CarbonFormatSharedDictionaryTest.java  |   48 -
 ...CarbonDictionarySortIndexReaderImplTest.java |  135 -
 .../org/carbondata/core/util/ByteUtilTest.java  |  167 -
 .../carbondata/core/util/CarbonUtilTest.java    |   35 -
 .../writer/CarbonDictionaryWriterImplTest.java  |  530 --
 .../core/writer/CarbonFooterWriterTest.java     |  213 -
 ...CarbonDictionarySortIndexWriterImplTest.java |  158 -
 .../org/carbondata/scan/QueryExecutor_UT.java   |   35 -
 .../scan/executor/util/QueryUtilTest.java       |  133 -
 dev/findbugs-exclude.xml                        |   10 +-
 dev/java-code-format-template.xml               |    4 +-
 dev/javastyle-config.xml                        |    2 +-
 dev/scalastyle-config.xml                       |    4 +-
 docs/Carbon-Packaging-and-Interfaces.md         |    2 +-
 .../examples/AllDictionaryExample.scala         |   62 +
 .../carbondata/examples/CarbonExample.scala     |   55 +
 .../examples/ComplexTypeExample.scala           |   78 +
 .../examples/DataFrameAPIExample.scala          |   65 +
 .../examples/GenerateDictionaryExample.scala    |   93 +
 .../apache/carbondata/examples/PerfTest.scala   |  331 +
 .../examples/util/AllDictionaryUtil.scala       |  108 +
 .../examples/util/InitForExamples.scala         |   56 +
 .../examples/AllDictionaryExample.scala         |   62 -
 .../org/carbondata/examples/CarbonExample.scala |   55 -
 .../examples/ComplexTypeExample.scala           |   84 -
 .../examples/DataFrameAPIExample.scala          |   65 -
 .../examples/GenerateDictionaryExample.scala    |   93 -
 .../org/carbondata/examples/PerfTest.scala      |  331 -
 .../examples/util/AllDictionaryUtil.scala       |  108 -
 .../examples/util/InitForExamples.scala         |   56 -
 format/src/main/thrift/carbondata.thrift        |    2 +-
 format/src/main/thrift/carbondataindex.thrift   |    2 +-
 format/src/main/thrift/dictionary.thrift        |    2 +-
 format/src/main/thrift/dictionary_meta.thrift   |    2 +-
 format/src/main/thrift/schema.thrift            |    2 +-
 format/src/main/thrift/sort_index.thrift        |    2 +-
 format/src/main/thrift/table_status.thrift      |    2 +-
 .../carbondata/hadoop/CarbonInputFormat.java    |  742 ++
 .../carbondata/hadoop/CarbonInputSplit.java     |   69 +
 .../carbondata/hadoop/CarbonPathFilter.java     |   44 +
 .../carbondata/hadoop/CarbonProjection.java     |   42 +
 .../hadoop/CarbonRawDataInputSplit.java         |   63 +
 .../carbondata/hadoop/CarbonRecordReader.java   |  106 +
 .../hadoop/readsupport/CarbonReadSupport.java   |   46 +
 .../AbstractDictionaryDecodedReadSupport.java   |   87 +
 .../impl/ArrayWritableReadSupport.java          |   50 +
 .../impl/DictionaryDecodedReadSupportImpl.java  |   35 +
 .../readsupport/impl/RawDataReadSupport.java    |   49 +
 .../hadoop/util/CarbonInputFormatUtil.java      |  131 +
 .../hadoop/util/ObjectSerializationUtil.java    |  117 +
 .../carbondata/hadoop/util/SchemaReader.java    |   69 +
 .../carbondata/hadoop/CarbonInputFormat.java    |  742 --
 .../org/carbondata/hadoop/CarbonInputSplit.java |   69 -
 .../org/carbondata/hadoop/CarbonPathFilter.java |   44 -
 .../org/carbondata/hadoop/CarbonProjection.java |   42 -
 .../hadoop/CarbonRawDataInputSplit.java         |   63 -
 .../carbondata/hadoop/CarbonRecordReader.java   |  106 -
 .../hadoop/readsupport/CarbonReadSupport.java   |   46 -
 .../AbstractDictionaryDecodedReadSupport.java   |   87 -
 .../impl/ArrayWritableReadSupport.java          |   50 -
 .../impl/DictionaryDecodedReadSupportImpl.java  |   35 -
 .../readsupport/impl/RawDataReadSupport.java    |   49 -
 .../hadoop/util/CarbonInputFormatUtil.java      |  131 -
 .../hadoop/util/ObjectSerializationUtil.java    |  117 -
 .../carbondata/hadoop/util/SchemaReader.java    |   69 -
 .../hadoop/ft/CarbonInputFormat_FT.java         |   81 +
 .../hadoop/ft/CarbonInputMapperTest.java        |  189 +
 .../test/util/ObjectSerializationUtilTest.java  |   56 +
 .../hadoop/test/util/StoreCreator.java          |  564 ++
 .../hadoop/ft/CarbonInputFormat_FT.java         |   81 -
 .../hadoop/ft/CarbonInputMapperTest.java        |  189 -
 .../test/util/ObjectSerializationUtilTest.java  |   56 -
 .../hadoop/test/util/StoreCreator.java          |  564 --
 .../sql/common/util/CarbonHiveContext.scala     |    4 +-
 .../allqueries/AllDataTypesTestCase1.scala      | 4494 ++++++++++
 .../allqueries/AllDataTypesTestCase2.scala      | 8186 ++++++++++++++++++
 .../allqueries/AllDataTypesTestCase3.scala      | 1472 ++++
 .../allqueries/AllDataTypesTestCase4.scala      | 1956 +++++
 .../allqueries/AllDataTypesTestCase5.scala      | 3268 +++++++
 .../allqueries/AllDataTypesTestCase6.scala      | 2579 ++++++
 .../allqueries/AllDataTypesTestCase1.scala      | 4494 ----------
 .../allqueries/AllDataTypesTestCase2.scala      | 8186 ------------------
 .../allqueries/AllDataTypesTestCase3.scala      | 1472 ----
 .../allqueries/AllDataTypesTestCase4.scala      | 1956 -----
 .../allqueries/AllDataTypesTestCase5.scala      | 3268 -------
 .../allqueries/AllDataTypesTestCase6.scala      | 2579 ------
 .../spark/merger/CarbonCompactionExecutor.java  |  221 +
 .../spark/merger/CarbonCompactionUtil.java      |  132 +
 .../spark/merger/CompactionCallable.java        |   44 +
 .../spark/merger/CompactionType.java            |   28 +
 .../spark/merger/RowResultMerger.java           |  326 +
 .../spark/merger/TupleConversionAdapter.java    |   85 +
 .../MalformedCarbonCommandException.java        |   83 +
 .../carbondata/spark/load/CarbonLoadModel.java  |  527 ++
 .../carbondata/spark/load/CarbonLoaderUtil.java | 1409 +++
 .../spark/load/DeleteLoadFolders.java           |  361 +
 .../spark/load/DeleteLoadFromMetadata.java      |   44 +
 .../spark/load/DeletedLoadMetadata.java         |   53 +
 .../spark/merger/CarbonDataMergerUtil.java      |  729 ++
 .../spark/merger/NodeBlockRelation.java         |   60 +
 .../spark/merger/NodeMultiBlockRelation.java    |   59 +
 .../spark/partition/api/DataPartitioner.java    |   63 +
 .../spark/partition/api/Partition.java          |   51 +
 .../partition/api/impl/CSVFilePartitioner.java  |  365 +
 .../api/impl/DataPartitionerProperties.java     |   90 +
 .../partition/api/impl/DefaultLoadBalancer.java |   78 +
 .../spark/partition/api/impl/PartitionImpl.java |   63 +
 .../api/impl/PartitionMultiFileImpl.java        |   51 +
 .../api/impl/QueryPartitionHelper.java          |  189 +
 .../api/impl/SampleDataPartitionerImpl.java     |  151 +
 .../spark/partition/reader/CSVIterator.java     |   74 +
 .../spark/partition/reader/CSVParser.java       |  559 ++
 .../spark/partition/reader/CSVReader.java       |  496 ++
 .../spark/partition/reader/CSVWriter.java       |  396 +
 .../spark/partition/reader/LineReader.java      |   68 +
 .../spark/partition/reader/ResultSetHelper.java |   87 +
 .../reader/ResultSetHelperService.java          |  327 +
 .../readsupport/SparkRowReadSupportImpl.java    |   66 +
 .../carbondata/spark/splits/TableSplit.java     |  129 +
 .../carbondata/spark/util/CarbonQueryUtil.java  |  255 +
 .../carbondata/spark/util/LoadMetadataUtil.java |  113 +
 .../spark/merger/CarbonCompactionExecutor.java  |  221 -
 .../spark/merger/CarbonCompactionUtil.java      |  132 -
 .../spark/merger/CompactionCallable.java        |   44 -
 .../spark/merger/CompactionType.java            |   28 -
 .../spark/merger/RowResultMerger.java           |  326 -
 .../spark/merger/TupleConversionAdapter.java    |   85 -
 .../MalformedCarbonCommandException.java        |   83 -
 .../carbondata/spark/load/CarbonLoadModel.java  |  527 --
 .../carbondata/spark/load/CarbonLoaderUtil.java | 1395 ---
 .../spark/load/DeleteLoadFolders.java           |  361 -
 .../spark/load/DeleteLoadFromMetadata.java      |   44 -
 .../spark/load/DeletedLoadMetadata.java         |   53 -
 .../spark/merger/CarbonDataMergerUtil.java      |  729 --
 .../spark/merger/NodeBlockRelation.java         |   60 -
 .../spark/merger/NodeMultiBlockRelation.java    |   59 -
 .../spark/partition/api/DataPartitioner.java    |   63 -
 .../spark/partition/api/Partition.java          |   51 -
 .../partition/api/impl/CSVFilePartitioner.java  |  365 -
 .../api/impl/DataPartitionerProperties.java     |   90 -
 .../partition/api/impl/DefaultLoadBalancer.java |   78 -
 .../spark/partition/api/impl/PartitionImpl.java |   63 -
 .../api/impl/PartitionMultiFileImpl.java        |   51 -
 .../api/impl/QueryPartitionHelper.java          |  189 -
 .../api/impl/SampleDataPartitionerImpl.java     |  151 -
 .../spark/partition/reader/CSVIterator.java     |   74 -
 .../spark/partition/reader/CSVParser.java       |  559 --
 .../spark/partition/reader/CSVReader.java       |  496 --
 .../spark/partition/reader/CSVWriter.java       |  396 -
 .../spark/partition/reader/LineReader.java      |   68 -
 .../spark/partition/reader/ResultSetHelper.java |   87 -
 .../reader/ResultSetHelperService.java          |  327 -
 .../readsupport/SparkRowReadSupportImpl.java    |   66 -
 .../org/carbondata/spark/splits/TableSplit.java |  128 -
 .../carbondata/spark/util/CarbonQueryUtil.java  |  255 -
 .../carbondata/spark/util/LoadMetadataUtil.java |  113 -
 .../spark/CarbonColumnValidator.scala           |   36 +
 .../apache/carbondata/spark/CarbonFilters.scala |  364 +
 .../apache/carbondata/spark/CarbonOption.scala  |   39 +
 .../carbondata/spark/CarbonSparkFactory.scala   |   60 +
 .../spark/DictionaryDetailHelper.scala          |   62 +
 .../org/apache/carbondata/spark/KeyVal.scala    |   89 +
 .../spark/csv/CarbonCsvRelation.scala           |  242 +
 .../carbondata/spark/csv/CarbonTextFile.scala   |   58 +
 .../carbondata/spark/csv/DefaultSource.scala    |  175 +
 .../org/apache/carbondata/spark/package.scala   |  124 +
 .../spark/rdd/CarbonCleanFilesRDD.scala         |   83 +
 .../spark/rdd/CarbonDataFrameRDD.scala          |   36 +
 .../spark/rdd/CarbonDataLoadRDD.scala           |  489 ++
 .../spark/rdd/CarbonDataPartitionRDD.scala      |  112 +
 .../spark/rdd/CarbonDataRDDFactory.scala        |  921 ++
 .../spark/rdd/CarbonDeleteLoadByDateRDD.scala   |   94 +
 .../spark/rdd/CarbonDeleteLoadRDD.scala         |   84 +
 .../spark/rdd/CarbonDropTableRDD.scala          |   76 +
 .../spark/rdd/CarbonGlobalDictionaryRDD.scala   |  547 ++
 .../carbondata/spark/rdd/CarbonMergerRDD.scala  |  362 +
 .../carbondata/spark/rdd/CarbonScanRDD.scala    |  290 +
 .../apache/carbondata/spark/rdd/Compactor.scala |  154 +
 .../spark/tasks/DictionaryWriterTask.scala      |  110 +
 .../spark/tasks/SortIndexWriterTask.scala       |   59 +
 .../spark/thriftserver/CarbonThriftServer.scala |   58 +
 .../carbondata/spark/util/CarbonScalaUtil.scala |  166 +
 .../carbondata/spark/util/CommonUtil.scala      |  202 +
 .../spark/util/GlobalDictionaryUtil.scala       |  778 ++
 .../carbondata/spark/util/QueryPlanUtil.scala   |   47 +
 .../apache/spark/sql/CarbonBoundReference.scala |    2 +-
 .../org/apache/spark/sql/CarbonContext.scala    |   11 +-
 .../sql/CarbonDatasourceHadoopRelation.scala    |   12 +-
 .../spark/sql/CarbonDatasourceRelation.scala    |   14 +-
 .../spark/sql/CarbonDictionaryDecoder.scala     |   14 +-
 .../org/apache/spark/sql/CarbonOperators.scala  |   11 +-
 .../org/apache/spark/sql/CarbonSqlParser.scala  |   13 +-
 .../spark/sql/SparkUnknownExpression.scala      |   12 +-
 .../execution/command/carbonTableSchema.scala   |   69 +-
 .../spark/sql/hive/CarbonMetastoreCatalog.scala |   51 +-
 .../spark/sql/hive/CarbonSQLDialect.scala       |    2 +-
 .../spark/sql/hive/CarbonStrategies.scala       |    9 +-
 .../spark/sql/hive/DistributionUtil.scala       |    6 +-
 .../spark/sql/optimizer/CarbonOptimizer.scala   |    2 +-
 .../scala/org/apache/spark/util/FileUtils.scala |    8 +-
 .../org/apache/spark/util/SplitUtils.scala      |    2 +-
 .../spark/CarbonColumnValidator.scala           |   36 -
 .../org/carbondata/spark/CarbonFilters.scala    |  364 -
 .../org/carbondata/spark/CarbonOption.scala     |   39 -
 .../carbondata/spark/CarbonSparkFactory.scala   |   63 -
 .../spark/DictionaryDetailHelper.scala          |   66 -
 .../scala/org/carbondata/spark/KeyVal.scala     |   89 -
 .../spark/csv/CarbonCsvRelation.scala           |  242 -
 .../carbondata/spark/csv/CarbonTextFile.scala   |   63 -
 .../carbondata/spark/csv/DefaultSource.scala    |  175 -
 .../scala/org/carbondata/spark/package.scala    |  124 -
 .../spark/rdd/CarbonCleanFilesRDD.scala         |   83 -
 .../spark/rdd/CarbonDataFrameRDD.scala          |   36 -
 .../spark/rdd/CarbonDataLoadRDD.scala           |  495 --
 .../spark/rdd/CarbonDataPartitionRDD.scala      |  113 -
 .../spark/rdd/CarbonDataRDDFactory.scala        |  921 --
 .../spark/rdd/CarbonDeleteLoadByDateRDD.scala   |   95 -
 .../spark/rdd/CarbonDeleteLoadRDD.scala         |   84 -
 .../spark/rdd/CarbonDropTableRDD.scala          |   76 -
 .../spark/rdd/CarbonGlobalDictionaryRDD.scala   |  546 --
 .../carbondata/spark/rdd/CarbonMergerRDD.scala  |  362 -
 .../carbondata/spark/rdd/CarbonScanRDD.scala    |  290 -
 .../org/carbondata/spark/rdd/Compactor.scala    |  154 -
 .../spark/tasks/DictionaryWriterTask.scala      |  110 -
 .../spark/tasks/SortIndexWriterTask.scala       |   62 -
 .../spark/thriftserver/CarbonThriftServer.scala |   58 -
 .../carbondata/spark/util/CarbonScalaUtil.scala |  166 -
 .../org/carbondata/spark/util/CommonUtil.scala  |  202 -
 .../spark/util/GlobalDictionaryUtil.scala       |  780 --
 .../carbondata/spark/util/QueryPlanUtil.scala   |   47 -
 .../spark/load/CarbonLoaderUtilTest.java        |    6 +-
 .../validation/FileFooterValidator.java         |   30 +-
 .../aggquery/IntegerDataTypeTestCase.scala      |   49 +
 .../spark/testsuite/bigdecimal/TestBigInt.scala |   93 +
 .../TestDimensionWithDecimalDataType.scala      |   61 +
 ...plexPrimitiveTimestampDirectDictionary.scala |   72 +
 .../complexType/TestComplexTypeQuery.scala      |  165 +
 .../complexType/TestCreateTableWithDouble.scala |   95 +
 .../MultiFilesDataLoagdingTestCase.scala        |   59 +
 .../dataload/TestLoadDataWithBlankLine.scala    |   49 +
 .../TestLoadDataWithEmptyArrayColumns.scala     |   66 +
 .../dataload/TestLoadDataWithJunkChars.scala    |   61 +
 .../TestLoadDataWithMaxMinInteger.scala         |   98 +
 .../dataload/TestLoadDataWithNullMeasures.scala |   53 +
 .../testsuite/emptyrow/TestEmptyRows.scala      |   82 +
 .../aggquery/AggregateQueryTestCase.scala       |   45 +
 .../AllDataTypesTestCaseAggregate.scala         |  111 +
 .../aggquery/AverageQueryTestCase.scala         |  112 +
 .../AllDataTypesTestCaseAggregate.scala         | 1162 +++
 .../allqueries/TestQueryWithoutDataLoad.scala   |   63 +
 .../testsuite/bigdecimal/TestAvgForBigInt.scala |   59 +
 .../testsuite/bigdecimal/TestBigDecimal.scala   |  204 +
 .../bigdecimal/TestNullAndEmptyFields.scala     |  110 +
 .../blockprune/BlockPruneQueryTestCase.scala    |  115 +
 .../createtable/TestCreateTableSyntax.scala     |  132 +
 .../DataCompactionCardinalityBoundryTest.scala  |  134 +
 .../datacompaction/DataCompactionLockTest.scala |  129 +
 .../DataCompactionNoDictionaryTest.scala        |  179 +
 .../datacompaction/DataCompactionTest.scala     |  176 +
 .../MajorCompactionIgnoreInMinorTest.scala      |  148 +
 .../MajorCompactionStopsAfterCompaction.scala   |  143 +
 .../dataload/DefaultSourceTestCase.scala        |  105 +
 .../TestDataWithDicExcludeAndInclude.scala      |  106 +
 .../dataload/TestLoadDataWithHiveSyntax.scala   |  601 ++
 ...ataWithMalformedCarbonCommandException.scala |  163 +
 .../dataload/TestLoadDataWithNoMeasure.scala    |  128 +
 .../TestLoadDataWithNotProperInputFile.scala    |   76 +
 .../dataload/TestLoadTblNameIsKeyword.scala     |   96 +
 .../TestNoInvertedIndexLoadAndQuery.scala       |  105 +
 .../dataretention/DataRetentionTestCase.scala   |  257 +
 .../deleteTable/TestDeleteTableNewDDL.scala     |  112 +
 .../detailquery/AllDataTypesTestCase.scala      |   54 +
 .../ColumnGroupDataTypesTestCase.scala          |  149 +
 .../ColumnPropertyValidationTestCase.scala      |   46 +
 .../HighCardinalityDataTypesTestCase.scala      |  250 +
 .../detailquery/IntegerDataTypeTestCase.scala   |   48 +
 .../NoDictionaryColumnTestCase.scala            |   60 +
 ...estampDataTypeDirectDictionaryTestCase.scala |  157 +
 ...TypeDirectDictionaryWithNoDictTestCase.scala |  107 +
 .../TimestampDataTypeNullDataTest.scala         |   92 +
 .../filterexpr/AllDataTypesTestCaseFilter.scala |   66 +
 .../filterexpr/CountStarTestCase.scala          |   73 +
 .../filterexpr/FilterProcessorTestCase.scala    |  298 +
 .../GrtLtFilterProcessorTestCase.scala          |  176 +
 .../filterexpr/IntegerDataTypeTestCase.scala    |   48 +
 .../NullMeasureValueTestCaseFilter.scala        |   58 +
 .../HadoopFSRelationTestCase.scala              |   69 +
 .../joinquery/AllDataTypesTestCaseJoin.scala    |   54 +
 .../joinquery/IntegerDataTypeTestCase.scala     |   48 +
 .../NullMeasureValueTestCaseAggregate.scala     |   85 +
 .../TestNullValueSerialization.scala            |  112 +
 .../sortexpr/AllDataTypesTestCaseSort.scala     |   54 +
 .../sortexpr/IntegerDataTypeTestCase.scala      |   49 +
 .../windowsexpr/WindowsExprTestCase.scala       |   78 +
 .../spark/util/AllDictionaryTestCase.scala      |  140 +
 .../AutoHighCardinalityIdentifyTestCase.scala   |  178 +
 .../spark/util/DictionaryTestCaseUtil.scala     |   51 +
 .../util/ExternalColumnDictionaryTestCase.scala |  213 +
 ...GlobalDictionaryUtilConcurrentTestCase.scala |  179 +
 .../util/GlobalDictionaryUtilTestCase.scala     |  214 +
 .../apache/spark/sql/TestCarbonSqlParser.scala  |    2 +-
 .../sql/common/util/CarbonHiveContext.scala     |    4 +-
 ...plexPrimitiveTimestampDirectDictionary.scala |   72 -
 .../complexType/TestComplexTypeQuery.scala      |  165 -
 .../complexType/TestCreateTableWithDouble.scala |   95 -
 .../dataload/TestLoadDataWithBlankLine.scala    |   49 -
 .../TestLoadDataWithEmptyArrayColumns.scala     |   66 -
 .../dataload/TestLoadDataWithJunkChars.scala    |   61 -
 .../TestLoadDataWithMaxMinInteger.scala         |   98 -
 .../dataload/TestLoadDataWithNullMeasures.scala |   53 -
 .../aggquery/AggregateQueryTestCase.scala       |   45 -
 .../AllDataTypesTestCaseAggregate.scala         |  111 -
 .../aggquery/AverageQueryTestCase.scala         |  110 -
 .../aggquery/IntegerDataTypeTestCase.scala      |   49 -
 .../AllDataTypesTestCaseAggregate.scala         | 1162 ---
 .../allqueries/TestQueryWithoutDataLoad.scala   |   63 -
 .../testsuite/bigdecimal/TestAvgForBigInt.scala |   59 -
 .../testsuite/bigdecimal/TestBigDecimal.scala   |  204 -
 .../spark/testsuite/bigdecimal/TestBigInt.scala |   93 -
 .../TestDimensionWithDecimalDataType.scala      |   61 -
 .../bigdecimal/TestNullAndEmptyFields.scala     |  110 -
 .../blockprune/BlockPruneQueryTestCase.scala    |  115 -
 .../createtable/TestCreateTableSyntax.scala     |  132 -
 .../DataCompactionCardinalityBoundryTest.scala  |  134 -
 .../datacompaction/DataCompactionLockTest.scala |  129 -
 .../DataCompactionNoDictionaryTest.scala        |  179 -
 .../datacompaction/DataCompactionTest.scala     |  176 -
 .../MajorCompactionIgnoreInMinorTest.scala      |  148 -
 .../MajorCompactionStopsAfterCompaction.scala   |  143 -
 .../dataload/DefaultSourceTestCase.scala        |  105 -
 .../MultiFilesDataLoagdingTestCase.scala        |   59 -
 .../TestDataWithDicExcludeAndInclude.scala      |  107 -
 .../dataload/TestLoadDataWithHiveSyntax.scala   |  600 --
 ...ataWithMalformedCarbonCommandException.scala |  163 -
 .../dataload/TestLoadDataWithNoMeasure.scala    |  128 -
 .../TestLoadDataWithNotProperInputFile.scala    |   78 -
 .../dataload/TestLoadTblNameIsKeyword.scala     |   96 -
 .../TestNoInvertedIndexLoadAndQuery.scala       |  105 -
 .../dataretention/DataRetentionTestCase.scala   |  257 -
 .../deleteTable/TestDeleteTableNewDDL.scala     |  112 -
 .../detailquery/AllDataTypesTestCase.scala      |   54 -
 .../ColumnGroupDataTypesTestCase.scala          |  149 -
 .../ColumnPropertyValidationTestCase.scala      |   46 -
 .../HighCardinalityDataTypesTestCase.scala      |  250 -
 .../detailquery/IntegerDataTypeTestCase.scala   |   48 -
 .../NoDictionaryColumnTestCase.scala            |   60 -
 ...estampDataTypeDirectDictionaryTestCase.scala |  157 -
 ...TypeDirectDictionaryWithNoDictTestCase.scala |  107 -
 .../TimestampDataTypeNullDataTest.scala         |   92 -
 .../testsuite/emptyrow/TestEmptyRows.scala      |   82 -
 .../filterexpr/AllDataTypesTestCaseFilter.scala |   66 -
 .../filterexpr/CountStarTestCase.scala          |   72 -
 .../filterexpr/FilterProcessorTestCase.scala    |  294 -
 .../GrtLtFilterProcessorTestCase.scala          |  176 -
 .../filterexpr/IntegerDataTypeTestCase.scala    |   48 -
 .../NullMeasureValueTestCaseFilter.scala        |   58 -
 .../HadoopFSRelationTestCase.scala              |   69 -
 .../joinquery/AllDataTypesTestCaseJoin.scala    |   54 -
 .../joinquery/IntegerDataTypeTestCase.scala     |   48 -
 .../NullMeasureValueTestCaseAggregate.scala     |   84 -
 .../TestNullValueSerialization.scala            |  112 -
 .../sortexpr/AllDataTypesTestCaseSort.scala     |   54 -
 .../sortexpr/IntegerDataTypeTestCase.scala      |   49 -
 .../windowsexpr/WindowsExprTestCase.scala       |   78 -
 .../spark/util/AllDictionaryTestCase.scala      |  140 -
 .../AutoHighCardinalityIdentifyTestCase.scala   |  186 -
 .../spark/util/DictionaryTestCaseUtil.scala     |   52 -
 .../util/ExternalColumnDictionaryTestCase.scala |  213 -
 ...GlobalDictionaryUtilConcurrentTestCase.scala |  177 -
 .../util/GlobalDictionaryUtilTestCase.scala     |  214 -
 .../plugin.xml                                  |    2 +-
 .../carbonautoagggraphgenerator/plugin.xml      |    2 +-
 .../steps/carbonautoaggslicemerger/plugin.xml   |    2 +-
 .../steps/carboncsvbasedseqgen/plugin.xml       |    2 +-
 .../plugins/steps/carboncsvreader/plugin.xml    |    2 +-
 .../steps/carboncsvreaderstrep/plugin.xml       |    2 +-
 .../plugins/steps/carbondatawriter/plugin.xml   |    2 +-
 .../plugins/steps/carbonfactreader/plugin.xml   |    2 +-
 .../plugins/steps/carbongroupby/plugin.xml      |    2 +-
 .../steps/carboninmemoryfactreader/plugin.xml   |    2 +-
 .../plugins/steps/carbonseqgen/plugin.xml       |    2 +-
 .../plugins/steps/carbonslicemerger/plugin.xml  |    2 +-
 .../steps/carbonsortkeyandgroupby/plugin.xml    |    2 +-
 .../plugins/steps/mdkeygenstep/plugin.xml       |    2 +-
 .../plugins/steps/sortkeystep/plugin.xml        |    2 +-
 processing/pom.xml                              |   10 +-
 .../fileoperations/AtomicFileOperations.java    |   33 +
 .../AtomicFileOperationsImpl.java               |   87 +
 .../lcm/fileoperations/FileWriteOperation.java  |   25 +
 .../lcm/locks/AbstractCarbonLock.java           |   77 +
 .../carbondata/lcm/locks/CarbonLockFactory.java |   94 +
 .../carbondata/lcm/locks/HdfsFileLock.java      |  106 +
 .../carbondata/lcm/locks/ICarbonLock.java       |   40 +
 .../carbondata/lcm/locks/LocalFileLock.java     |  159 +
 .../apache/carbondata/lcm/locks/LockUsage.java  |   31 +
 .../carbondata/lcm/locks/ZooKeeperLocking.java  |  195 +
 .../carbondata/lcm/locks/ZookeeperInit.java     |   82 +
 .../lcm/status/SegmentStatusManager.java        |  507 ++
 .../api/dataloader/DataLoadModel.java           |  201 +
 .../processing/api/dataloader/SchemaInfo.java   |  191 +
 .../constants/DataProcessorConstants.java       |   59 +
 .../processing/csvload/DataGraphExecuter.java   |  635 ++
 .../processing/csvload/GraphExecutionUtil.java  |  362 +
 .../processing/csvreaderstep/CsvInput.java      |  528 ++
 .../processing/csvreaderstep/CsvInputData.java  |   49 +
 .../processing/csvreaderstep/CsvInputMeta.java  |  937 ++
 .../csvreaderstep/CustomDataStream.java         |  126 +
 .../csvreaderstep/UnivocityCsvParser.java       |  176 +
 .../csvreaderstep/UnivocityCsvParserVo.java     |  184 +
 .../csvreaderstep/step-attributes.xml           |  229 +
 .../dataprocessor/DataProcessTaskStatus.java    |  286 +
 .../dataprocessor/IDataProcessStatus.java       |  192 +
 .../manager/CarbonDataProcessorManager.java     |   69 +
 .../processing/dataprocessor/queue/Queue.java   |   47 +
 .../queue/impl/DataProcessorQueue.java          |  106 +
 .../queue/impl/RecordComparator.java            |   45 +
 .../holder/DataProcessorRecordHolder.java       |   65 +
 .../processing/datatypes/ArrayDataType.java     |  289 +
 .../processing/datatypes/GenericDataType.java   |  154 +
 .../processing/datatypes/PrimitiveDataType.java |  268 +
 .../processing/datatypes/StructDataType.java    |  316 +
 .../processing/etl/DataLoadingException.java    |   52 +
 .../exception/CarbonDataProcessorException.java |   80 +
 .../graphgenerator/GraphGenerator.java          |  994 +++
 .../graphgenerator/GraphGeneratorConstants.java |   76 +
 .../graphgenerator/GraphGeneratorException.java |   81 +
 .../configuration/GraphConfigurationInfo.java   | 1061 +++
 .../processing/iterator/CarbonIterator.java     |   38 +
 .../processing/mdkeygen/MDKeyGenStep.java       |  527 ++
 .../processing/mdkeygen/MDKeyGenStepData.java   |   39 +
 .../processing/mdkeygen/MDKeyGenStepMeta.java   |  582 ++
 .../processing/mdkeygen/file/FileData.java      |   74 +
 .../processing/mdkeygen/file/FileManager.java   |   68 +
 .../mdkeygen/file/IFileManagerComposite.java    |   59 +
 .../mdkeygen/messages/messages_en_US.properties |   22 +
 .../merger/exeception/SliceMergerException.java |   80 +
 .../merger/step/CarbonSliceMergerStep.java      |  161 +
 .../merger/step/CarbonSliceMergerStepData.java  |   43 +
 .../merger/step/CarbonSliceMergerStepMeta.java  |  586 ++
 .../schema/metadata/ArrayWrapper.java           |   64 +
 .../schema/metadata/ColumnSchemaDetails.java    |  102 +
 .../metadata/ColumnSchemaDetailsWrapper.java    |  110 +
 .../processing/schema/metadata/ColumnsInfo.java |  504 ++
 .../schema/metadata/HierarchiesInfo.java        |  130 +
 .../schema/metadata/SortObserver.java           |   42 +
 .../processing/schema/metadata/TableOption.java |   82 +
 .../schema/metadata/TableOptionWrapper.java     |  106 +
 .../CarbonSortKeyAndGroupByException.java       |   89 +
 .../sortdata/AbstractTempSortFileReader.java    |  141 +
 .../sortdata/AbstractTempSortFileWriter.java    |  102 +
 .../sortdata/CompressedTempSortFileReader.java  |   52 +
 .../sortdata/CompressedTempSortFileWriter.java  |   79 +
 .../sortdata/FileMergerParameters.java          |  216 +
 .../sortdata/IntermediateFileMerger.java        |  371 +
 .../sortandgroupby/sortdata/RowComparator.java  |   96 +
 .../sortdata/RowComparatorForNormalDims.java    |   64 +
 .../sortandgroupby/sortdata/SortDataRows.java   |  616 ++
 .../sortdata/SortTempFileChunkHolder.java       |  519 ++
 .../sortdata/SortTempFileChunkWriter.java       |   77 +
 .../sortdata/TempSortFileReader.java            |   39 +
 .../sortdata/TempSortFileReaderFactory.java     |   45 +
 .../sortdata/TempSortFileWriter.java            |   48 +
 .../sortdata/TempSortFileWriterFactory.java     |   43 +
 .../UnCompressedTempSortFileReader.java         |   50 +
 .../UnCompressedTempSortFileWriter.java         |  114 +
 .../sortdatastep/SortKeyStep.java               |  277 +
 .../sortdatastep/SortKeyStepMeta.java           |  550 ++
 .../sortdatastep/SortKeyStepData.java           |   53 +
 .../store/CarbonDataFileAttributes.java         |   90 +
 .../store/CarbonFactDataHandlerColumnar.java    | 1312 +++
 .../store/CarbonFactDataHandlerModel.java       |  393 +
 .../processing/store/CarbonFactHandler.java     |   32 +
 .../processing/store/CarbonKeyBlockHolder.java  |   48 +
 .../store/SingleThreadFinalSortFilesMerger.java |  288 +
 .../store/colgroup/ColGroupBlockStorage.java    |   98 +
 .../store/colgroup/ColGroupDataHolder.java      |  103 +
 .../store/colgroup/ColGroupMinMax.java          |  217 +
 .../store/colgroup/ColumnDataHolder.java        |   40 +
 .../processing/store/colgroup/DataHolder.java   |   40 +
 .../store/messages/messages_en_US.properties    |   22 +
 .../store/writer/AbstractFactDataWriter.java    |  698 ++
 .../store/writer/CarbonFactDataWriter.java      |   89 +
 ...actDataWriterImplForIntIndexAndAggBlock.java |  487 ++
 .../processing/store/writer/NodeHolder.java     |  456 +
 .../exception/CarbonDataWriterException.java    |   81 +
 .../csvbased/BadRecordslogger.java              |  169 +
 .../CarbonCSVBasedDimSurrogateKeyGen.java       |  513 ++
 .../csvbased/CarbonCSVBasedSeqGenData.java      |  173 +
 .../csvbased/CarbonCSVBasedSeqGenMeta.java      | 1707 ++++
 .../csvbased/CarbonCSVBasedSeqGenStep.java      | 1869 ++++
 .../FileStoreSurrogateKeyGenForCSV.java         |  402 +
 .../dbbased/CarbonDimSurrogateKeyGen.java       |  286 +
 .../dbbased/HierarchyValueWriter.java           |  155 +
 .../dbbased/IntArrayWrapper.java                |  110 +
 .../dbbased/messages/messages_en_US.properties  |   61 +
 .../util/CarbonDataProcessorUtil.java           |  284 +
 .../processing/util/CarbonSchemaParser.java     | 1281 +++
 .../processing/util/RemoveDictionaryUtil.java   |  426 +
 .../fileoperations/AtomicFileOperations.java    |   33 -
 .../AtomicFileOperationsImpl.java               |   87 -
 .../lcm/fileoperations/FileWriteOperation.java  |   25 -
 .../lcm/locks/AbstractCarbonLock.java           |   77 -
 .../carbondata/lcm/locks/CarbonLockFactory.java |   94 -
 .../org/carbondata/lcm/locks/HdfsFileLock.java  |  106 -
 .../org/carbondata/lcm/locks/ICarbonLock.java   |   40 -
 .../org/carbondata/lcm/locks/LocalFileLock.java |  159 -
 .../org/carbondata/lcm/locks/LockUsage.java     |   31 -
 .../carbondata/lcm/locks/ZooKeeperLocking.java  |  195 -
 .../org/carbondata/lcm/locks/ZookeeperInit.java |   82 -
 .../lcm/status/SegmentStatusManager.java        |  500 --
 .../api/dataloader/DataLoadModel.java           |  201 -
 .../processing/api/dataloader/SchemaInfo.java   |  191 -
 .../constants/DataProcessorConstants.java       |   59 -
 .../processing/csvload/DataGraphExecuter.java   |  635 --
 .../processing/csvload/GraphExecutionUtil.java  |  362 -
 .../processing/csvreaderstep/CsvInput.java      |  528 --
 .../processing/csvreaderstep/CsvInputData.java  |   49 -
 .../processing/csvreaderstep/CsvInputMeta.java  |  937 --
 .../csvreaderstep/CustomDataStream.java         |  126 -
 .../csvreaderstep/UnivocityCsvParser.java       |  176 -
 .../csvreaderstep/UnivocityCsvParserVo.java     |  184 -
 .../csvreaderstep/step-attributes.xml           |  229 -
 .../dataprocessor/DataProcessTaskStatus.java    |  286 -
 .../dataprocessor/IDataProcessStatus.java       |  192 -
 .../manager/CarbonDataProcessorManager.java     |   69 -
 .../processing/dataprocessor/queue/Queue.java   |   47 -
 .../queue/impl/DataProcessorQueue.java          |  106 -
 .../queue/impl/RecordComparator.java            |   45 -
 .../holder/DataProcessorRecordHolder.java       |   65 -
 .../processing/datatypes/ArrayDataType.java     |  289 -
 .../processing/datatypes/GenericDataType.java   |  154 -
 .../processing/datatypes/PrimitiveDataType.java |  268 -
 .../processing/datatypes/StructDataType.java    |  316 -
 .../processing/etl/DataLoadingException.java    |   52 -
 .../exception/CarbonDataProcessorException.java |   80 -
 .../graphgenerator/GraphGenerator.java          |  987 ---
 .../graphgenerator/GraphGeneratorConstants.java |   76 -
 .../graphgenerator/GraphGeneratorException.java |   81 -
 .../configuration/GraphConfigurationInfo.java   | 1061 ---
 .../processing/iterator/CarbonIterator.java     |   38 -
 .../processing/mdkeygen/MDKeyGenStep.java       |  527 --
 .../processing/mdkeygen/MDKeyGenStepData.java   |   39 -
 .../processing/mdkeygen/MDKeyGenStepMeta.java   |  582 --
 .../processing/mdkeygen/file/FileData.java      |   74 -
 .../processing/mdkeygen/file/FileManager.java   |   68 -
 .../mdkeygen/file/IFileManagerComposite.java    |   59 -
 .../mdkeygen/messages/messages_en_US.properties |   22 -
 .../merger/exeception/SliceMergerException.java |   80 -
 .../merger/step/CarbonSliceMergerStep.java      |  161 -
 .../merger/step/CarbonSliceMergerStepData.java  |   43 -
 .../merger/step/CarbonSliceMergerStepMeta.java  |  586 --
 .../schema/metadata/ArrayWrapper.java           |   64 -
 .../schema/metadata/ColumnSchemaDetails.java    |  102 -
 .../metadata/ColumnSchemaDetailsWrapper.java    |  110 -
 .../processing/schema/metadata/ColumnsInfo.java |  504 --
 .../schema/metadata/HierarchiesInfo.java        |  130 -
 .../schema/metadata/SortObserver.java           |   42 -
 .../processing/schema/metadata/TableOption.java |   82 -
 .../schema/metadata/TableOptionWrapper.java     |  106 -
 .../CarbonSortKeyAndGroupByException.java       |   89 -
 .../sortdata/AbstractTempSortFileReader.java    |  141 -
 .../sortdata/AbstractTempSortFileWriter.java    |  102 -
 .../sortdata/CompressedTempSortFileReader.java  |   52 -
 .../sortdata/CompressedTempSortFileWriter.java  |   79 -
 .../sortdata/FileMergerParameters.java          |  216 -
 .../sortdata/IntermediateFileMerger.java        |  371 -
 .../sortandgroupby/sortdata/RowComparator.java  |   96 -
 .../sortdata/RowComparatorForNormalDims.java    |   64 -
 .../sortandgroupby/sortdata/SortDataRows.java   |  616 --
 .../sortdata/SortTempFileChunkHolder.java       |  519 --
 .../sortdata/SortTempFileChunkWriter.java       |   77 -
 .../sortdata/TempSortFileReader.java            |   39 -
 .../sortdata/TempSortFileReaderFactory.java     |   45 -
 .../sortdata/TempSortFileWriter.java            |   48 -
 .../sortdata/TempSortFileWriterFactory.java     |   43 -
 .../UnCompressedTempSortFileReader.java         |   50 -
 .../UnCompressedTempSortFileWriter.java         |  114 -
 .../sortdatastep/SortKeyStep.java               |  276 -
 .../sortdatastep/SortKeyStepData.java           |   53 -
 .../sortdatastep/SortKeyStepMeta.java           |  549 --
 .../store/CarbonDataFileAttributes.java         |   90 -
 .../store/CarbonFactDataHandlerColumnar.java    | 1312 ---
 .../store/CarbonFactDataHandlerModel.java       |  393 -
 .../processing/store/CarbonFactHandler.java     |   32 -
 .../processing/store/CarbonKeyBlockHolder.java  |   48 -
 .../store/SingleThreadFinalSortFilesMerger.java |  288 -
 .../store/colgroup/ColGroupBlockStorage.java    |   98 -
 .../store/colgroup/ColGroupDataHolder.java      |  103 -
 .../store/colgroup/ColGroupMinMax.java          |  217 -
 .../store/colgroup/ColumnDataHolder.java        |   40 -
 .../processing/store/colgroup/DataHolder.java   |   40 -
 .../store/messages/messages_en_US.properties    |   22 -
 .../store/writer/AbstractFactDataWriter.java    |  698 --
 .../store/writer/CarbonFactDataWriter.java      |   89 -
 ...actDataWriterImplForIntIndexAndAggBlock.java |  487 --
 .../processing/store/writer/NodeHolder.java     |  456 -
 .../exception/CarbonDataWriterException.java    |   81 -
 .../csvbased/BadRecordslogger.java              |  169 -
 .../CarbonCSVBasedDimSurrogateKeyGen.java       |  513 --
 .../csvbased/CarbonCSVBasedSeqGenData.java      |  173 -
 .../csvbased/CarbonCSVBasedSeqGenMeta.java      | 1707 ----
 .../csvbased/CarbonCSVBasedSeqGenStep.java      | 1869 ----
 .../FileStoreSurrogateKeyGenForCSV.java         |  402 -
 .../dbbased/CarbonDimSurrogateKeyGen.java       |  286 -
 .../dbbased/HierarchyValueWriter.java           |  155 -
 .../dbbased/IntArrayWrapper.java                |  110 -
 .../dbbased/messages/messages_en_US.properties  |   61 -
 .../util/CarbonDataProcessorUtil.java           |  284 -
 .../processing/util/CarbonSchemaParser.java     | 1281 ---
 .../processing/util/RemoveDictionaryUtil.java   |  426 -
 .../TimeStampDirectDictionaryGenerator_UT.java  |   75 +
 .../carbondata/lcm/locks/LocalFileLockTest.java |   63 +
 .../lcm/locks/ZooKeeperLockingTest.java         |  143 +
 .../store/colgroup/ColGroupMinMaxTest.java      |  253 +
 .../TimeStampDirectDictionaryGenerator_UT.java  |   75 -
 .../carbondata/lcm/locks/LocalFileLockTest.java |   66 -
 .../lcm/locks/ZooKeeperLockingTest.java         |  143 -
 .../store/colgroup/ColGroupMinMaxTest.java      |  257 -
 1398 files changed, 134329 insertions(+), 134344 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/apache/carbondata/common/CarbonIterator.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/carbondata/common/CarbonIterator.java b/common/src/main/java/org/apache/carbondata/common/CarbonIterator.java
new file mode 100644
index 0000000..9141bcd
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/CarbonIterator.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common;
+
+import java.util.Iterator;
+
+/**
+ * CarbonIterator adds default implement for remove. This is required for Java 7.
+ * @param <E>
+ */
+public abstract class CarbonIterator<E> implements Iterator<E> {
+
+  @Override public abstract boolean hasNext();
+
+  @Override public abstract E next();
+
+  @Override public void remove() {
+    throw new UnsupportedOperationException("remove");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/apache/carbondata/common/logging/LogService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/carbondata/common/logging/LogService.java b/common/src/main/java/org/apache/carbondata/common/logging/LogService.java
new file mode 100644
index 0000000..70e6fbe
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/logging/LogService.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging;
+
+/**
+ * for Log Services
+ */
+public interface LogService {
+
+  void debug(String message);
+
+  void info(String message);
+
+  void warn(String message);
+
+  void error(String message);
+
+  void error(Throwable throwable);
+
+  void error(Throwable throwable, String message);
+
+  void audit(String message);
+
+  /**
+   * Below method will be used to log the statistic information
+   *
+   * @param message statistic message
+   */
+  void statistic(String message);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/apache/carbondata/common/logging/LogServiceFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/carbondata/common/logging/LogServiceFactory.java b/common/src/main/java/org/apache/carbondata/common/logging/LogServiceFactory.java
new file mode 100644
index 0000000..693c2a8
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/logging/LogServiceFactory.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging;
+
+import org.apache.carbondata.common.logging.impl.StandardLogService;
+
+/**
+ * Log Service factory
+ */
+public final class LogServiceFactory {
+  private LogServiceFactory() {
+
+  }
+
+  /**
+   * return Logger Service.
+   *
+   * @param className provides class name
+   * @return LogService
+   */
+  public static LogService getLogService(final String className) {
+    return new StandardLogService(className);
+  }
+
+  public static LogService getLogService() {
+    return new StandardLogService();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/apache/carbondata/common/logging/impl/AuditExtendedRollingFileAppender.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/carbondata/common/logging/impl/AuditExtendedRollingFileAppender.java b/common/src/main/java/org/apache/carbondata/common/logging/impl/AuditExtendedRollingFileAppender.java
new file mode 100644
index 0000000..fcdcb4b
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/logging/impl/AuditExtendedRollingFileAppender.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.impl;
+
+import org.apache.log4j.spi.LoggingEvent;
+
+/**
+ * Copied form log4j and modified for renaming files and restriction only for
+ * audit logging
+ */
+public class AuditExtendedRollingFileAppender extends ExtendedRollingFileAppender {
+
+  /**g
+   * Call RollingFileAppender method to append the log...
+   *
+   * @see org.apache.log4j.RollingFileAppender#subAppend(LoggingEvent)
+   */
+  protected void subAppend(LoggingEvent event) {
+    if (event.getLevel().toInt() == AuditLevel.AUDIT.toInt()) {
+      currentLevel = AuditLevel.AUDIT.toInt();
+      super.subAppend(event);
+    }
+  }
+}



[30/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/exception/FilterUnsupportedException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/exception/FilterUnsupportedException.java b/core/src/main/java/org/apache/carbondata/scan/expression/exception/FilterUnsupportedException.java
new file mode 100644
index 0000000..fd66555
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/exception/FilterUnsupportedException.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.exception;
+
+import java.util.Locale;
+
+public class FilterUnsupportedException extends Exception {
+
+  /**
+   * default serial version ID.
+   */
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * The Error message.
+   */
+  private String msg = "";
+
+  /**
+   * Constructor
+   *
+   * @param errorCode The error code for this exception.
+   * @param msg       The error message for this exception.
+   */
+  public FilterUnsupportedException(String msg) {
+    super(msg);
+    this.msg = msg;
+  }
+
+  /**
+   * Constructor
+   *
+   * @param errorCode The error code for this exception.
+   * @param msg       The error message for this exception.
+   */
+  public FilterUnsupportedException(String msg, Throwable t) {
+    super(msg, t);
+    this.msg = msg;
+  }
+
+  /**
+   * Constructor
+   *
+   * @param errorCode The error code for this exception.
+   * @param msg       The error message for this exception.
+   */
+  public FilterUnsupportedException(Throwable t) {
+    super(t);
+  }
+
+  /**
+   * This method is used to get the localized message.
+   *
+   * @param locale - A Locale object represents a specific geographical,
+   *               political, or cultural region.
+   * @return - Localized error message.
+   */
+  public String getLocalizedMessage(Locale locale) {
+    return "";
+  }
+
+  /**
+   * getLocalizedMessage
+   */
+  @Override public String getLocalizedMessage() {
+    return super.getLocalizedMessage();
+  }
+
+  /**
+   * getMessage
+   */
+  public String getMessage() {
+    return this.msg;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/logical/AndExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/logical/AndExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/logical/AndExpression.java
new file mode 100644
index 0000000..79120e5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/logical/AndExpression.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.logical;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class AndExpression extends BinaryLogicalExpression {
+
+  private static final long serialVersionUID = 1L;
+
+  public AndExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult resultLeft = left.evaluate(value);
+    ExpressionResult resultRight = right.evaluate(value);
+    switch (resultLeft.getDataType()) {
+      case BOOLEAN:
+        resultLeft.set(DataType.BOOLEAN, (resultLeft.getBoolean() && resultRight.getBoolean()));
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "Incompatible datatype for applying AND Expression Filter");
+    }
+    return resultLeft;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    // TODO Auto-generated method stub
+    return ExpressionType.AND;
+  }
+
+  @Override public String getString() {
+    // TODO Auto-generated method stub
+    return "And(" + left.getString() + ',' + right.getString() + ')';
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/logical/BinaryLogicalExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/logical/BinaryLogicalExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/logical/BinaryLogicalExpression.java
new file mode 100644
index 0000000..1b4a0fc
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/logical/BinaryLogicalExpression.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.logical;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.scan.expression.BinaryExpression;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.LiteralExpression;
+
+public abstract class BinaryLogicalExpression extends BinaryExpression {
+
+  /**
+   *
+   */
+  private static final long serialVersionUID = 1L;
+
+  public BinaryLogicalExpression(Expression left, Expression right) {
+    super(left, right);
+    // TODO Auto-generated constructor stub
+  }
+
+  public List<ExpressionResult> getLiterals() {
+    List<ExpressionResult> listOfExp =
+        new ArrayList<ExpressionResult>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    getExpressionResultList(this, listOfExp);
+    Collections.sort(listOfExp);
+    return listOfExp;
+  }
+
+  // Will get the column informations involved in the expressions by
+  // traversing the tree
+  public List<ColumnExpression> getColumnList() {
+    // TODO
+    List<ColumnExpression> listOfExp =
+        new ArrayList<ColumnExpression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    getColumnList(this, listOfExp);
+    return listOfExp;
+  }
+
+  private void getColumnList(Expression expression, List<ColumnExpression> lst) {
+    if (expression instanceof ColumnExpression) {
+      ColumnExpression colExp = (ColumnExpression) expression;
+      boolean found = false;
+
+      for (ColumnExpression currentColExp : lst) {
+        if (currentColExp.getColumnName().equals(colExp.getColumnName())) {
+          found = true;
+          colExp.setColIndex(currentColExp.getColIndex());
+          break;
+        }
+      }
+      if (!found) {
+        colExp.setColIndex(lst.size());
+        lst.add(colExp);
+      }
+    }
+    for (Expression child : expression.getChildren()) {
+      getColumnList(child, lst);
+    }
+  }
+
+  public boolean isSingleDimension() {
+    List<ColumnExpression> listOfExp =
+        new ArrayList<ColumnExpression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    getColumnList(this, listOfExp);
+    if (listOfExp.size() == 1 && listOfExp.get(0).isDimension()) {
+      return true;
+    }
+    return false;
+
+  }
+
+  private void getExpressionResultList(Expression binaryConditionalExpression,
+      List<ExpressionResult> listOfExp) {
+    if (binaryConditionalExpression instanceof LiteralExpression) {
+      ExpressionResult colExp =
+          ((LiteralExpression) binaryConditionalExpression).getExpressionResult();
+      listOfExp.add(colExp);
+    }
+    for (Expression child : binaryConditionalExpression.getChildren()) {
+      getExpressionResultList(child, listOfExp);
+    }
+
+  }
+
+  /**
+   * the method will return flag (true or false) depending on the existence of the
+   * direct dictionary columns in conditional expression
+   *
+   * @return the method will return flag (true or false)
+   */
+  public boolean isDirectDictionaryColumns() {
+    List<ColumnExpression> listOfExp =
+        new ArrayList<ColumnExpression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    getColumnList(this, listOfExp);
+    for (ColumnExpression ce : listOfExp) {
+      if (!ce.getCarbonColumn().hasEncoding(Encoding.DICTIONARY)) {
+        return true;
+      }
+    }
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/logical/NotExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/logical/NotExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/logical/NotExpression.java
new file mode 100644
index 0000000..2be732b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/logical/NotExpression.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.logical;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.UnaryExpression;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class NotExpression extends UnaryExpression {
+  private static final long serialVersionUID = 1L;
+
+  public NotExpression(Expression child) {
+    super(child);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterIllegalMemberException, FilterUnsupportedException {
+    ExpressionResult expResult = child.evaluate(value);
+    expResult.set(DataType.BOOLEAN, !(expResult.getBoolean()));
+    switch (expResult.getDataType()) {
+      case BOOLEAN:
+        expResult.set(DataType.BOOLEAN, !(expResult.getBoolean()));
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "Incompatible datatype for applying NOT Expression Filter");
+    }
+    return expResult;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.NOT;
+  }
+
+  @Override public String getString() {
+    return "Not(" + child.getString() + ')';
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/logical/OrExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/logical/OrExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/logical/OrExpression.java
new file mode 100644
index 0000000..4723fb8
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/logical/OrExpression.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.logical;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class OrExpression extends BinaryLogicalExpression {
+
+  private static final long serialVersionUID = 4220598043176438380L;
+
+  public OrExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterIllegalMemberException, FilterUnsupportedException {
+    ExpressionResult resultLeft = left.evaluate(value);
+    ExpressionResult resultRight = right.evaluate(value);
+    switch (resultLeft.getDataType()) {
+      case BOOLEAN:
+        resultLeft.set(DataType.BOOLEAN, (resultLeft.getBoolean() || resultRight.getBoolean()));
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "Incompatible datatype for applying OR Expression Filter");
+    }
+
+    return resultLeft;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.OR;
+  }
+
+  @Override public String getString() {
+    return "Or(" + left.getString() + ',' + right.getString() + ')';
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/DimColumnFilterInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/DimColumnFilterInfo.java b/core/src/main/java/org/apache/carbondata/scan/filter/DimColumnFilterInfo.java
new file mode 100644
index 0000000..0d51286
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/DimColumnFilterInfo.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter;
+
+import java.io.Serializable;
+import java.util.List;
+
+public class DimColumnFilterInfo implements Serializable {
+
+  private static final long serialVersionUID = 8181578747306832771L;
+
+  private boolean isIncludeFilter;
+
+  private List<Integer> filterList;
+
+  /**
+   * maintain the no dictionary filter values list.
+   */
+  private List<byte[]> noDictionaryFilterValuesList;
+
+  public List<byte[]> getNoDictionaryFilterValuesList() {
+    return noDictionaryFilterValuesList;
+  }
+
+  public boolean isIncludeFilter() {
+    return isIncludeFilter;
+  }
+
+  public void setIncludeFilter(boolean isIncludeFilter) {
+    this.isIncludeFilter = isIncludeFilter;
+  }
+
+  public List<Integer> getFilterList() {
+    return filterList;
+  }
+
+  public void setFilterList(List<Integer> filterList) {
+    this.filterList = filterList;
+  }
+
+  public void setFilterListForNoDictionaryCols(List<byte[]> noDictionaryFilterValuesList) {
+    this.noDictionaryFilterValuesList = noDictionaryFilterValuesList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/FilterExpressionProcessor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/FilterExpressionProcessor.java b/core/src/main/java/org/apache/carbondata/scan/filter/FilterExpressionProcessor.java
new file mode 100644
index 0000000..b17dd3e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/FilterExpressionProcessor.java
@@ -0,0 +1,352 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter;
+
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.datastore.DataRefNodeFinder;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.carbon.datastore.block.AbstractIndex;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.impl.btree.BTreeDataRefNodeFinder;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.expression.BinaryExpression;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.conditional.BinaryConditionalExpression;
+import org.apache.carbondata.scan.expression.conditional.ConditionalExpression;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.resolver.ConditionalFilterResolverImpl;
+import org.apache.carbondata.scan.filter.resolver.FilterResolverIntf;
+import org.apache.carbondata.scan.filter.resolver.LogicalFilterResolverImpl;
+import org.apache.carbondata.scan.filter.resolver.RowLevelFilterResolverImpl;
+import org.apache.carbondata.scan.filter.resolver.RowLevelRangeFilterResolverImpl;
+
+public class FilterExpressionProcessor implements FilterProcessor {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(FilterExpressionProcessor.class.getName());
+
+  /**
+   * Implementation will provide the resolved form of filters based on the
+   * filter expression tree which is been passed in Expression instance.
+   *
+   * @param expressionTree  , filter expression tree
+   * @param tableIdentifier ,contains carbon store informations
+   * @return a filter resolver tree
+   * @throws QueryExecutionException
+   * @throws FilterUnsupportedException
+   */
+  public FilterResolverIntf getFilterResolver(Expression expressionTree,
+      AbsoluteTableIdentifier tableIdentifier) throws FilterUnsupportedException {
+    if (null != expressionTree && null != tableIdentifier) {
+      return getFilterResolvertree(expressionTree, tableIdentifier);
+    }
+    return null;
+  }
+
+  /**
+   * This API will scan the Segment level all btrees and selects the required
+   * block reference  nodes inorder to push the same to executer for applying filters
+   * on the respective data reference node.
+   * Following Algorithm is followed in below API
+   * Step:1 Get the start end key based on the filter tree resolver information
+   * Step:2 Prepare the IndexKeys inorder to scan the tree and get the start and end reference
+   * node(block)
+   * Step:3 Once data reference node ranges retrieved traverse the node within this range
+   * and select the node based on the block min and max value and the filter value.
+   * Step:4 The selected blocks will be send to executers for applying the filters with the help
+   * of Filter executers.
+   *
+   * @throws QueryExecutionException
+   */
+  public List<DataRefNode> getFilterredBlocks(DataRefNode btreeNode,
+      FilterResolverIntf filterResolver, AbstractIndex tableSegment,
+      AbsoluteTableIdentifier tableIdentifier) throws QueryExecutionException {
+    // Need to get the current dimension tables
+    List<DataRefNode> listOfDataBlocksToScan = new ArrayList<DataRefNode>();
+    // getting the start and end index key based on filter for hitting the
+    // selected block reference nodes based on filter resolver tree.
+    LOGGER.debug("preparing the start and end key for finding"
+        + "start and end block as per filter resolver");
+    List<IndexKey> listOfStartEndKeys = new ArrayList<IndexKey>(2);
+    FilterUtil.traverseResolverTreeAndGetStartAndEndKey(tableSegment.getSegmentProperties(),
+        tableIdentifier, filterResolver, listOfStartEndKeys);
+    // reading the first value from list which has start key
+    IndexKey searchStartKey = listOfStartEndKeys.get(0);
+    // reading the last value from list which has end key
+    IndexKey searchEndKey = listOfStartEndKeys.get(1);
+    if (null == searchStartKey && null == searchEndKey) {
+      try {
+        // TODO need to handle for no dictionary dimensions
+        searchStartKey =
+            FilterUtil.prepareDefaultStartIndexKey(tableSegment.getSegmentProperties());
+        // TODO need to handle for no dictionary dimensions
+        searchEndKey = FilterUtil.prepareDefaultEndIndexKey(tableSegment.getSegmentProperties());
+      } catch (KeyGenException e) {
+        return listOfDataBlocksToScan;
+      }
+    }
+
+    LOGGER.debug(
+        "Successfully retrieved the start and end key" + "Dictionary Start Key: " + searchStartKey
+            .getDictionaryKeys() + "No Dictionary Start Key " + searchStartKey.getNoDictionaryKeys()
+            + "Dictionary End Key: " + searchEndKey.getDictionaryKeys() + "No Dictionary End Key "
+            + searchEndKey.getNoDictionaryKeys());
+    long startTimeInMillis = System.currentTimeMillis();
+    DataRefNodeFinder blockFinder = new BTreeDataRefNodeFinder(
+        tableSegment.getSegmentProperties().getEachDimColumnValueSize());
+    DataRefNode startBlock = blockFinder.findFirstDataBlock(btreeNode, searchStartKey);
+    DataRefNode endBlock = blockFinder.findLastDataBlock(btreeNode, searchEndKey);
+    FilterExecuter filterExecuter =
+        FilterUtil.getFilterExecuterTree(filterResolver, tableSegment.getSegmentProperties(),null);
+    while (startBlock != endBlock) {
+      addBlockBasedOnMinMaxValue(filterExecuter, listOfDataBlocksToScan, startBlock,
+          tableSegment.getSegmentProperties());
+      startBlock = startBlock.getNextDataRefNode();
+    }
+    addBlockBasedOnMinMaxValue(filterExecuter, listOfDataBlocksToScan, endBlock,
+        tableSegment.getSegmentProperties());
+    LOGGER.info("Total Time in retrieving the data reference node" + "after scanning the btree " + (
+        System.currentTimeMillis() - startTimeInMillis)
+        + " Total number of data reference node for executing filter(s) " + listOfDataBlocksToScan
+        .size());
+
+    return listOfDataBlocksToScan;
+  }
+
+  /**
+   * Selects the blocks based on col max and min value.
+   *
+   * @param filterResolver
+   * @param listOfDataBlocksToScan
+   * @param dataRefNode
+   * @param segmentProperties
+   */
+  private void addBlockBasedOnMinMaxValue(FilterExecuter filterExecuter,
+      List<DataRefNode> listOfDataBlocksToScan, DataRefNode dataRefNode,
+      SegmentProperties segmentProperties) {
+
+    BitSet bitSet = filterExecuter
+        .isScanRequired(dataRefNode.getColumnsMaxValue(), dataRefNode.getColumnsMinValue());
+    if (!bitSet.isEmpty()) {
+      listOfDataBlocksToScan.add(dataRefNode);
+
+    }
+  }
+
+  /**
+   * API will return a filter resolver instance which will be used by
+   * executers to evaluate or execute the filters.
+   *
+   * @param expressionTree , resolver tree which will hold the resolver tree based on
+   *                       filter expression.
+   * @return FilterResolverIntf type.
+   * @throws QueryExecutionException
+   * @throws FilterUnsupportedException
+   */
+  private FilterResolverIntf getFilterResolvertree(Expression expressionTree,
+      AbsoluteTableIdentifier tableIdentifier) throws FilterUnsupportedException {
+    FilterResolverIntf filterEvaluatorTree =
+        createFilterResolverTree(expressionTree, tableIdentifier, null);
+    traverseAndResolveTree(filterEvaluatorTree, tableIdentifier);
+    return filterEvaluatorTree;
+  }
+
+  /**
+   * constructing the filter resolver tree based on filter expression.
+   * this method will visit each node of the filter resolver and prepares
+   * the surrogates of the filter members which are involved filter
+   * expression.
+   *
+   * @param filterResolverTree
+   * @param tableIdentifier
+   * @throws FilterUnsupportedException
+   * @throws QueryExecutionException
+   */
+  private void traverseAndResolveTree(FilterResolverIntf filterResolverTree,
+      AbsoluteTableIdentifier tableIdentifier) throws FilterUnsupportedException {
+    if (null == filterResolverTree) {
+      return;
+    }
+    traverseAndResolveTree(filterResolverTree.getLeft(), tableIdentifier);
+
+    filterResolverTree.resolve(tableIdentifier);
+
+    traverseAndResolveTree(filterResolverTree.getRight(), tableIdentifier);
+  }
+
+  /**
+   * Pattern used : Visitor Pattern
+   * Method will create filter resolver tree based on the filter expression tree,
+   * in this algorithm based on the expression instance the resolvers will created
+   *
+   * @param expressionTree
+   * @param tableIdentifier
+   * @return
+   */
+  private FilterResolverIntf createFilterResolverTree(Expression expressionTree,
+      AbsoluteTableIdentifier tableIdentifier, Expression intermediateExpression) {
+    ExpressionType filterExpressionType = expressionTree.getFilterExpressionType();
+    BinaryExpression currentExpression = null;
+    switch (filterExpressionType) {
+      case OR:
+        currentExpression = (BinaryExpression) expressionTree;
+        return new LogicalFilterResolverImpl(
+            createFilterResolverTree(currentExpression.getLeft(), tableIdentifier,
+                currentExpression),
+            createFilterResolverTree(currentExpression.getRight(), tableIdentifier,
+                currentExpression),currentExpression);
+      case AND:
+        currentExpression = (BinaryExpression) expressionTree;
+        return new LogicalFilterResolverImpl(
+            createFilterResolverTree(currentExpression.getLeft(), tableIdentifier,
+                currentExpression),
+            createFilterResolverTree(currentExpression.getRight(), tableIdentifier,
+                currentExpression), currentExpression);
+      case EQUALS:
+      case IN:
+        return getFilterResolverBasedOnExpressionType(ExpressionType.EQUALS, false, expressionTree,
+            tableIdentifier, expressionTree);
+      case GREATERTHAN:
+      case GREATERTHAN_EQUALTO:
+      case LESSTHAN:
+      case LESSTHAN_EQUALTO:
+        return getFilterResolverBasedOnExpressionType(ExpressionType.EQUALS, true, expressionTree,
+            tableIdentifier, expressionTree);
+
+      case NOT_EQUALS:
+      case NOT_IN:
+        return getFilterResolverBasedOnExpressionType(ExpressionType.NOT_EQUALS, false,
+            expressionTree, tableIdentifier, expressionTree);
+
+      default:
+        return getFilterResolverBasedOnExpressionType(ExpressionType.UNKNOWN, false, expressionTree,
+            tableIdentifier, expressionTree);
+    }
+  }
+
+  /**
+   * Factory method which will return the resolver instance based on filter expression
+   * expressions.
+   */
+  private FilterResolverIntf getFilterResolverBasedOnExpressionType(
+      ExpressionType filterExpressionType, boolean isExpressionResolve, Expression expression,
+      AbsoluteTableIdentifier tableIdentifier, Expression expressionTree) {
+    BinaryConditionalExpression currentCondExpression = null;
+    ConditionalExpression condExpression = null;
+    switch (filterExpressionType) {
+      case EQUALS:
+        currentCondExpression = (BinaryConditionalExpression) expression;
+        if (currentCondExpression.isSingleDimension()
+            && currentCondExpression.getColumnList().get(0).getCarbonColumn().getDataType()
+            != DataType.ARRAY
+            && currentCondExpression.getColumnList().get(0).getCarbonColumn().getDataType()
+            != DataType.STRUCT) {
+          // getting new dim index.
+          if (!currentCondExpression.getColumnList().get(0).getCarbonColumn()
+              .hasEncoding(Encoding.DICTIONARY) || currentCondExpression.getColumnList().get(0)
+              .getCarbonColumn().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+            if (FilterUtil.checkIfExpressionContainsColumn(currentCondExpression.getLeft())
+                && FilterUtil.checkIfExpressionContainsColumn(currentCondExpression.getRight()) || (
+                FilterUtil.checkIfRightExpressionRequireEvaluation(currentCondExpression.getRight())
+                    || FilterUtil
+                    .checkIfLeftExpressionRequireEvaluation(currentCondExpression.getLeft()))) {
+              return new RowLevelFilterResolverImpl(expression, isExpressionResolve, true,
+                  tableIdentifier);
+            }
+            if (currentCondExpression.getFilterExpressionType() == ExpressionType.GREATERTHAN
+                || currentCondExpression.getFilterExpressionType() == ExpressionType.LESSTHAN
+                || currentCondExpression.getFilterExpressionType()
+                == ExpressionType.GREATERTHAN_EQUALTO
+                || currentCondExpression.getFilterExpressionType()
+                == ExpressionType.LESSTHAN_EQUALTO) {
+              return new RowLevelRangeFilterResolverImpl(expression, isExpressionResolve, true,
+                  tableIdentifier);
+            }
+          }
+          return new ConditionalFilterResolverImpl(expression, isExpressionResolve, true);
+
+        }
+        break;
+      case NOT_EQUALS:
+        currentCondExpression = (BinaryConditionalExpression) expression;
+        if (currentCondExpression.isSingleDimension()
+            && currentCondExpression.getColumnList().get(0).getCarbonColumn().getDataType()
+            != DataType.ARRAY
+            && currentCondExpression.getColumnList().get(0).getCarbonColumn().getDataType()
+            != DataType.STRUCT) {
+          if (!currentCondExpression.getColumnList().get(0).getCarbonColumn()
+              .hasEncoding(Encoding.DICTIONARY) || currentCondExpression.getColumnList().get(0)
+              .getCarbonColumn().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+            if (FilterUtil.checkIfExpressionContainsColumn(currentCondExpression.getLeft())
+                && FilterUtil.checkIfExpressionContainsColumn(currentCondExpression.getRight()) || (
+                FilterUtil.checkIfRightExpressionRequireEvaluation(currentCondExpression.getRight())
+                    || FilterUtil
+                    .checkIfLeftExpressionRequireEvaluation(currentCondExpression.getLeft()))) {
+              return new RowLevelFilterResolverImpl(expression, isExpressionResolve, false,
+                  tableIdentifier);
+            }
+            if (expressionTree.getFilterExpressionType() == ExpressionType.GREATERTHAN
+                || expressionTree.getFilterExpressionType() == ExpressionType.LESSTHAN
+                || expressionTree.getFilterExpressionType() == ExpressionType.GREATERTHAN_EQUALTO
+                || expressionTree.getFilterExpressionType() == ExpressionType.LESSTHAN_EQUALTO) {
+
+              return new RowLevelRangeFilterResolverImpl(expression, isExpressionResolve, false,
+                  tableIdentifier);
+            }
+
+            return new ConditionalFilterResolverImpl(expression, isExpressionResolve, false);
+          }
+          return new ConditionalFilterResolverImpl(expression, isExpressionResolve, false);
+        }
+        break;
+      default:
+        condExpression = (ConditionalExpression) expression;
+        if (condExpression.isSingleDimension()
+            && condExpression.getColumnList().get(0).getCarbonColumn().getDataType()
+            != DataType.ARRAY
+            && condExpression.getColumnList().get(0).getCarbonColumn().getDataType()
+            != DataType.STRUCT) {
+          condExpression = (ConditionalExpression) expression;
+          if (condExpression.getColumnList().get(0).getCarbonColumn()
+              .hasEncoding(Encoding.DICTIONARY) && !condExpression.getColumnList().get(0)
+              .getCarbonColumn().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+            return new ConditionalFilterResolverImpl(expression, true, true);
+          } else {
+            return new RowLevelFilterResolverImpl(expression, false, false, tableIdentifier);
+          }
+        } else {
+          return new RowLevelFilterResolverImpl(expression, false, false, tableIdentifier);
+        }
+    }
+    return new RowLevelFilterResolverImpl(expression, false, false, tableIdentifier);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/FilterProcessor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/FilterProcessor.java b/core/src/main/java/org/apache/carbondata/scan/filter/FilterProcessor.java
new file mode 100644
index 0000000..35948e3
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/FilterProcessor.java
@@ -0,0 +1,60 @@
+package org.apache.carbondata.scan.filter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.datastore.block.AbstractIndex;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.resolver.FilterResolverIntf;
+
+public interface FilterProcessor {
+
+  /**
+   * API will provide the resolved form of filters based on the filter
+   * expression tree which is been passed.
+   *
+   * @param expressionTree  , filter expression tree
+   * @param tableIdentifier ,contains carbon store informations.
+   * @return
+   * @throws QueryExecutionException
+   * @throws FilterUnsupportedException
+   */
+  FilterResolverIntf getFilterResolver(Expression expressionTree,
+      AbsoluteTableIdentifier tableIdentifier) throws FilterUnsupportedException;
+
+  /**
+   * This API is exposed inorder to get the required block reference node
+   * based on the filter.The block list will be send to the executer tasks inorder
+   * to apply filters.
+   *
+   * @param filterResolver DataBlock list with resolved filters
+   * @return list of DataRefNode.
+   * @throws QueryExecutionException
+   */
+  List<DataRefNode> getFilterredBlocks(DataRefNode dataRefNode, FilterResolverIntf filterResolver,
+      AbstractIndex segmentIndexBuilder, AbsoluteTableIdentifier tableIdentifier)
+      throws QueryExecutionException;
+
+}



[27/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/intf/ExpressionType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/intf/ExpressionType.java b/core/src/main/java/org/apache/carbondata/scan/filter/intf/ExpressionType.java
new file mode 100644
index 0000000..4d658fc
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/intf/ExpressionType.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter.intf;
+
+public enum ExpressionType {
+
+  AND,
+  OR,
+  NOT,
+  EQUALS,
+  NOT_EQUALS,
+  LESSTHAN,
+  LESSTHAN_EQUALTO,
+  GREATERTHAN,
+  GREATERTHAN_EQUALTO,
+  ADD,
+  SUBSTRACT,
+  DIVIDE,
+  MULTIPLY,
+  IN,
+  LIST,
+  NOT_IN,
+  UNKNOWN,
+  LITERAL,
+  RANGE
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/intf/FilterExecuterType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/intf/FilterExecuterType.java b/core/src/main/java/org/apache/carbondata/scan/filter/intf/FilterExecuterType.java
new file mode 100644
index 0000000..39b7979
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/intf/FilterExecuterType.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.intf;
+
+import java.io.Serializable;
+
+public enum FilterExecuterType implements Serializable {
+
+  INCLUDE, EXCLUDE, OR, AND, RESTRUCTURE, ROWLEVEL, RANGE, ROWLEVEL_GREATERTHAN,
+  ROWLEVEL_GREATERTHAN_EQUALTO, ROWLEVEL_LESSTHAN_EQUALTO, ROWLEVEL_LESSTHAN
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/intf/RowImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/intf/RowImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/intf/RowImpl.java
new file mode 100644
index 0000000..e924e79
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/intf/RowImpl.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter.intf;
+
+public class RowImpl implements RowIntf {
+  private Object[] row;
+
+  public RowImpl() {
+    row = new Object[0];
+  }
+
+  @Override public Object getVal(int index) {
+    return row[index];
+  }
+
+  @Override public Object[] getValues() {
+    return row;
+  }
+
+  @Override public void setValues(final Object[] row) {
+    this.row = row;
+  }
+
+  @Override public int size() {
+    return this.row.length;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/intf/RowIntf.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/intf/RowIntf.java b/core/src/main/java/org/apache/carbondata/scan/filter/intf/RowIntf.java
new file mode 100644
index 0000000..d4b2a8f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/intf/RowIntf.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter.intf;
+
+public interface RowIntf {
+  Object getVal(int index);
+
+  Object[] getValues();
+
+  void setValues(Object[] setValues);
+
+  int size();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/AndFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/AndFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/AndFilterResolverImpl.java
new file mode 100644
index 0000000..2495621
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/AndFilterResolverImpl.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.resolver;
+
+import java.util.List;
+import java.util.SortedMap;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.expression.BinaryExpression;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+
+public class AndFilterResolverImpl extends LogicalFilterResolverImpl {
+
+  /**
+   *i
+   */
+  private static final long serialVersionUID = -761688076874662001L;
+
+  public AndFilterResolverImpl(FilterResolverIntf leftEvalutor, FilterResolverIntf rightEvalutor,
+      ExpressionType filterExpressionType,BinaryExpression expression) {
+    super(leftEvalutor, rightEvalutor, expression);
+  }
+
+  @Override public void getStartKey(long[] startKeys,
+      SortedMap<Integer, byte[]> noDicStartKeys, List<long[]> startKeyList)
+      throws QueryExecutionException {
+    leftEvalutor.getStartKey(startKeys, noDicStartKeys, startKeyList);
+    rightEvalutor.getStartKey(startKeys, noDicStartKeys, startKeyList);
+  }
+
+  @Override public void getEndKey(SegmentProperties segmentProperties,
+      AbsoluteTableIdentifier tableIdentifier, long[] endKeys,
+      SortedMap<Integer, byte[]> noDicEndKeys, List<long[]> endKeyList)
+      throws QueryExecutionException {
+    leftEvalutor.getEndKey(segmentProperties, tableIdentifier, endKeys, noDicEndKeys, endKeyList);
+    rightEvalutor.getEndKey(segmentProperties, tableIdentifier, endKeys, noDicEndKeys, endKeyList);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/ConditionalFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/ConditionalFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/ConditionalFilterResolverImpl.java
new file mode 100644
index 0000000..bcb0a1b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/ConditionalFilterResolverImpl.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.resolver;
+
+import java.util.List;
+import java.util.SortedMap;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.conditional.BinaryConditionalExpression;
+import org.apache.carbondata.scan.expression.conditional.ConditionalExpression;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.visitor.FilterInfoTypeVisitorFactory;
+
+public class ConditionalFilterResolverImpl implements FilterResolverIntf {
+
+  private static final long serialVersionUID = 1838955268462201691L;
+  protected Expression exp;
+  protected boolean isExpressionResolve;
+  protected boolean isIncludeFilter;
+  private DimColumnResolvedFilterInfo dimColResolvedFilterInfo;
+
+  public ConditionalFilterResolverImpl(Expression exp, boolean isExpressionResolve,
+      boolean isIncludeFilter) {
+    this.exp = exp;
+    this.isExpressionResolve = isExpressionResolve;
+    this.isIncludeFilter = isIncludeFilter;
+    this.dimColResolvedFilterInfo = new DimColumnResolvedFilterInfo();
+  }
+
+  /**
+   * This API will resolve the filter expression and generates the
+   * dictionaries for executing/evaluating the filter expressions in the
+   * executer layer.
+   *
+   * @throws QueryExecutionException
+   * @throws FilterUnsupportedException
+   */
+  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier)
+      throws FilterUnsupportedException {
+    FilterResolverMetadata metadata = new FilterResolverMetadata();
+    metadata.setTableIdentifier(absoluteTableIdentifier);
+    if ((!isExpressionResolve) && exp instanceof BinaryConditionalExpression) {
+      BinaryConditionalExpression binaryConditionalExpression = (BinaryConditionalExpression) exp;
+      Expression leftExp = binaryConditionalExpression.getLeft();
+      Expression rightExp = binaryConditionalExpression.getRight();
+      if (leftExp instanceof ColumnExpression) {
+        ColumnExpression columnExpression = (ColumnExpression) leftExp;
+        metadata.setColumnExpression(columnExpression);
+        metadata.setExpression(rightExp);
+        metadata.setIncludeFilter(isIncludeFilter);
+        // If imei=imei comes in filter condition then we need to
+        // skip processing of right expression.
+        // This flow has reached here assuming that this is a single
+        // column expression.
+        // we need to check if the other expression contains column
+        // expression or not in depth.
+        CarbonDimension dimension = columnExpression.getDimension();
+        if (FilterUtil.checkIfExpressionContainsColumn(rightExp)
+            || FilterUtil.isExpressionNeedsToResolved(rightExp, isIncludeFilter) &&
+            dimension.hasEncoding(Encoding.DICTIONARY) && !dimension
+            .hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+          isExpressionResolve = true;
+        } else {
+          //Visitor pattern is been used in this scenario inorder to populate the
+          // dimColResolvedFilterInfo
+          //visitable object with filter member values based on the visitor type, currently there
+          //3 types of visitors custom,direct and no dictionary, all types of visitor populate
+          //the visitable instance as per its buisness logic which is different for all the
+          // visitors.
+          dimColResolvedFilterInfo.populateFilterInfoBasedOnColumnType(
+              FilterInfoTypeVisitorFactory.getResolvedFilterInfoVisitor(columnExpression),
+              metadata);
+        }
+      } else if (rightExp instanceof ColumnExpression) {
+        ColumnExpression columnExpression = (ColumnExpression) rightExp;
+        metadata.setColumnExpression(columnExpression);
+        metadata.setExpression(leftExp);
+        metadata.setIncludeFilter(isIncludeFilter);
+        if (columnExpression.getDataType().equals(DataType.TIMESTAMP)) {
+          isExpressionResolve = true;
+        } else {
+          // if imei=imei comes in filter condition then we need to
+          // skip processing of right expression.
+          // This flow has reached here assuming that this is a single
+          // column expression.
+          // we need to check if the other expression contains column
+          // expression or not in depth.
+          if (FilterUtil.checkIfExpressionContainsColumn(leftExp)) {
+            isExpressionResolve = true;
+          } else {
+
+            dimColResolvedFilterInfo.populateFilterInfoBasedOnColumnType(
+                FilterInfoTypeVisitorFactory.getResolvedFilterInfoVisitor(columnExpression),
+                metadata);
+
+          }
+        }
+      } else {
+        isExpressionResolve = true;
+      }
+    }
+    if (isExpressionResolve && exp instanceof ConditionalExpression) {
+      ConditionalExpression conditionalExpression = (ConditionalExpression) exp;
+      List<ColumnExpression> columnList = conditionalExpression.getColumnList();
+      metadata.setColumnExpression(columnList.get(0));
+      metadata.setExpression(exp);
+      metadata.setIncludeFilter(isIncludeFilter);
+      if (!columnList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY)) {
+        dimColResolvedFilterInfo.populateFilterInfoBasedOnColumnType(
+            FilterInfoTypeVisitorFactory.getResolvedFilterInfoVisitor(columnList.get(0)), metadata);
+
+      } else if (columnList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY) && !(
+          columnList.get(0).getDimension().getDataType()
+              == org.apache.carbondata.core.carbon.metadata.datatype.DataType.STRUCT
+              || columnList.get(0).getDimension().getDataType()
+              == org.apache.carbondata.core.carbon.metadata.datatype.DataType.ARRAY)) {
+        dimColResolvedFilterInfo.setFilterValues(FilterUtil
+            .getFilterListForAllValues(absoluteTableIdentifier, exp, columnList.get(0),
+                isIncludeFilter));
+
+        dimColResolvedFilterInfo.setColumnIndex(columnList.get(0).getDimension().getOrdinal());
+        dimColResolvedFilterInfo.setDimension(columnList.get(0).getDimension());
+      }
+    }
+
+  }
+
+  /**
+   * Left node will not be presentin this scenario
+   *
+   * @return left node of type FilterResolverIntf instance
+   */
+  public FilterResolverIntf getLeft() {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+  /**
+   * Right node will not be presentin this scenario
+   *
+   * @return left node of type FilterResolverIntf instance
+   */
+  @Override public FilterResolverIntf getRight() {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+  /**
+   * Method will return the DimColumnResolvedFilterInfo instance which consists
+   * the mapping of the respective dimension and its surrogates involved in
+   * filter expression.
+   *
+   * @return DimColumnResolvedFilterInfo
+   */
+  public DimColumnResolvedFilterInfo getDimColResolvedFilterInfo() {
+    return dimColResolvedFilterInfo;
+  }
+
+  /**
+   * method will calculates the start key based on the filter surrogates
+   */
+  public void getStartKey(long[] startKey,
+      SortedMap<Integer, byte[]> setOfStartKeyByteArray, List<long[]> startKeyList)
+      throws QueryExecutionException {
+    if (null == dimColResolvedFilterInfo.getStarIndexKey()) {
+      FilterUtil.getStartKey(dimColResolvedFilterInfo.getDimensionResolvedFilterInstance(),
+          startKey, startKeyList);
+      FilterUtil.getStartKeyForNoDictionaryDimension(dimColResolvedFilterInfo,
+          setOfStartKeyByteArray);
+    }
+  }
+
+  /**
+   * method will get the start key based on the filter surrogates
+   *
+   * @return end IndexKey
+   * @throws QueryExecutionException
+   */
+  @Override public void getEndKey(SegmentProperties segmentProperties,
+      AbsoluteTableIdentifier absoluteTableIdentifier, long[] endKeys,
+      SortedMap<Integer, byte[]> setOfEndKeyByteArray, List<long[]> endKeyList)
+      throws QueryExecutionException {
+    if (null == dimColResolvedFilterInfo.getEndIndexKey()) {
+      FilterUtil.getEndKey(dimColResolvedFilterInfo.getDimensionResolvedFilterInstance(),
+          absoluteTableIdentifier, endKeys, segmentProperties, endKeyList);
+      FilterUtil.getEndKeyForNoDictionaryDimension(dimColResolvedFilterInfo,
+          setOfEndKeyByteArray);
+    }
+  }
+
+  /**
+   * Method will return the executer type for particular conditional resolver
+   * basically two types of executers will be formed for the conditional query.
+   *
+   * @return the filter executer type
+   */
+  @Override public FilterExecuterType getFilterExecuterType() {
+    switch (exp.getFilterExpressionType()) {
+      case NOT_EQUALS:
+      case NOT_IN:
+        return FilterExecuterType.EXCLUDE;
+
+      default:
+        return FilterExecuterType.INCLUDE;
+    }
+
+  }
+
+  @Override public Expression getFilterExpression() {
+    // TODO Auto-generated method stub
+    return exp;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/FilterResolverIntf.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/FilterResolverIntf.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/FilterResolverIntf.java
new file mode 100644
index 0000000..cd108fb
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/FilterResolverIntf.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.resolver;
+
+import java.io.Serializable;
+import java.util.List;
+import java.util.SortedMap;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+
+public interface FilterResolverIntf extends Serializable {
+
+  /**
+   * This API will resolve the filter expression and generates the
+   * dictionaries for executing/evaluating the filter expressions in the
+   * executer layer.
+   *
+   * @throws QueryExecutionException
+   * @throws FilterUnsupportedException
+   */
+  void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) throws FilterUnsupportedException;
+
+  /**
+   * This API will provide the left column filter expression
+   * inorder to resolve the left expression filter.
+   *
+   * @return FilterResolverIntf
+   */
+  FilterResolverIntf getLeft();
+
+  /**
+   * API will provide the right column filter expression inorder to resolve
+   * the right expression filter.
+   *
+   * @return FilterResolverIntf
+   */
+  FilterResolverIntf getRight();
+
+  /**
+   * API will return the resolved filter instance, this instance will provide
+   * the resolved surrogates based on the applied filter
+   *
+   * @return DimColumnResolvedFilterInfo object
+   */
+  DimColumnResolvedFilterInfo getDimColResolvedFilterInfo();
+
+  /**
+   * API will get the start key based on the filter applied based on the key generator
+   *
+   * @param segmentProperties
+   * @param startKey
+   * @param setOfStartKeyByteArray
+   */
+  void getStartKey(long[] startKey, SortedMap<Integer, byte[]> setOfStartKeyByteArray,
+      List<long[]> startKeyList) throws QueryExecutionException;
+
+  /**
+   * API will read the end key based on the max surrogate of
+   * particular dimension column
+   *
+   * @param setOfEndKeyByteArray
+   * @param endKeys
+   * @return
+   * @throws QueryExecutionException
+   */
+  void getEndKey(SegmentProperties segmentProperties, AbsoluteTableIdentifier tableIdentifier,
+      long[] endKeys, SortedMap<Integer, byte[]> setOfEndKeyByteArray, List<long[]> endKeyList)
+      throws QueryExecutionException;
+
+  /**
+   * API will return the filter executer type which will be used to evaluate
+   * the resolved filter while query execution
+   *
+   * @return FilterExecuterType.
+   */
+  FilterExecuterType getFilterExecuterType();
+
+  Expression getFilterExpression();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/LogicalFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/LogicalFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/LogicalFilterResolverImpl.java
new file mode 100644
index 0000000..3ab3e25
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/LogicalFilterResolverImpl.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter.resolver;
+
+import java.util.List;
+import java.util.SortedMap;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.expression.BinaryExpression;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+
+public class LogicalFilterResolverImpl implements FilterResolverIntf {
+  /**
+   *
+   */
+  private static final long serialVersionUID = 5734382980564402914L;
+
+  protected FilterResolverIntf leftEvalutor;
+
+  protected FilterResolverIntf rightEvalutor;
+
+  protected ExpressionType filterExpressionType;
+
+  private BinaryExpression filterExpression;
+
+  public LogicalFilterResolverImpl(FilterResolverIntf leftEvalutor,
+      FilterResolverIntf rightEvalutor, BinaryExpression currentExpression) {
+    this.leftEvalutor = leftEvalutor;
+    this.rightEvalutor = rightEvalutor;
+    this.filterExpressionType = currentExpression.getFilterExpressionType();
+    this.filterExpression = currentExpression;
+  }
+
+  /**
+   * Logical filter resolver will return the left and right filter expresison
+   * node for filter evaluation, so in this instance no implementation is required.
+   *
+   * @param absoluteTableIdentifier
+   */
+  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) {
+
+  }
+
+  /**
+   * Since its a binary condition expresion the getLeft method will get the left
+   * node of filter expression
+   *
+   * @return FilterResolverIntf.
+   */
+  public FilterResolverIntf getLeft() {
+    return leftEvalutor;
+  }
+
+  /**
+   * Since its a binary condition expresion the getRight method will get the left
+   * node of filter expression
+   *
+   * @return FilterResolverIntf.
+   */
+  public FilterResolverIntf getRight() {
+    return rightEvalutor;
+  }
+
+  @Override public DimColumnResolvedFilterInfo getDimColResolvedFilterInfo() {
+    return null;
+  }
+
+  @Override
+  public void getStartKey(long[] startKey, SortedMap<Integer, byte[]> setOfStartKeyByteArray,
+      List<long[]> startKeyList) throws QueryExecutionException {
+
+  }
+
+  @Override public void getEndKey(SegmentProperties segmentProperties,
+      AbsoluteTableIdentifier tableIdentifier, long[] endKeys,
+      SortedMap<Integer, byte[]> setOfEndKeyByteArray, List<long[]> endKeyList)
+      throws QueryExecutionException {
+
+  }
+
+  @Override public FilterExecuterType getFilterExecuterType() {
+    switch (filterExpressionType) {
+      case OR:
+        return FilterExecuterType.OR;
+      case AND:
+        return FilterExecuterType.AND;
+
+      default:
+        return null;
+    }
+  }
+
+  @Override public Expression getFilterExpression() {
+    return filterExpression;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RestructureFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RestructureFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RestructureFilterResolverImpl.java
new file mode 100644
index 0000000..a674698
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RestructureFilterResolverImpl.java
@@ -0,0 +1,206 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter.resolver;
+
+import java.util.List;
+import java.util.SortedMap;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.conditional.BinaryConditionalExpression;
+import org.apache.carbondata.scan.expression.conditional.ConditionalExpression;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+
+public class RestructureFilterResolverImpl implements FilterResolverIntf {
+  /**
+   *
+   */
+  private static final long serialVersionUID = -5399656036192814524L;
+
+  protected DimColumnResolvedFilterInfo dimColumnResolvedFilterInfo;
+
+  private Expression exp;
+
+  private String defaultValue;
+
+  private int surrogate;
+
+  private boolean isExpressionResolve;
+
+  private boolean isIncludeFilter;
+
+  public RestructureFilterResolverImpl(Expression exp, String defaultValue, int surrogate,
+      boolean isExpressionResolve, boolean isIncludeFilter) {
+    dimColumnResolvedFilterInfo = new DimColumnResolvedFilterInfo();
+    this.exp = exp;
+    this.defaultValue = defaultValue;
+    this.surrogate = surrogate;
+    this.isExpressionResolve = isExpressionResolve;
+    this.isIncludeFilter = isIncludeFilter;
+  }
+
+  /**
+   * Method will resolve the filters and it will replace the newly added dimension with default
+   * value
+   *
+   * @param absoluteTableIdentifier
+   * @throws FilterUnsupportedException
+   */
+  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier)
+      throws FilterUnsupportedException {
+
+    DimColumnResolvedFilterInfo dimColumnResolvedFilterInfo = new DimColumnResolvedFilterInfo();
+    if (!this.isExpressionResolve && exp instanceof BinaryConditionalExpression) {
+      BinaryConditionalExpression binaryConditionalExpression = (BinaryConditionalExpression) exp;
+      Expression left = binaryConditionalExpression.getLeft();
+      Expression right = binaryConditionalExpression.getRight();
+      if (left instanceof ColumnExpression) {
+        ColumnExpression columnExpression = (ColumnExpression) left;
+        if (columnExpression.getDataType().equals(DataType.TIMESTAMP)) {
+          isExpressionResolve = true;
+        } else {
+          // If imei=imei comes in filter condition then we need to
+          // skip processing of right expression.
+          // This flow has reached here assuming that this is a single
+          // column expression.
+          // we need to check if the other expression contains column
+          // expression or not in depth.
+          if (FilterUtil.checkIfExpressionContainsColumn(right)) {
+            isExpressionResolve = true;
+          } else {
+            dimColumnResolvedFilterInfo
+                .setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
+            dimColumnResolvedFilterInfo.setFilterValues(
+                FilterUtil.getFilterListForRS(right, columnExpression, defaultValue, surrogate));
+          }
+        }
+      } else if (right instanceof ColumnExpression) {
+        ColumnExpression columnExpression = (ColumnExpression) right;
+        if (columnExpression.getDataType().equals(DataType.TIMESTAMP)) {
+          isExpressionResolve = true;
+        } else {
+
+          // If imei=imei comes in filter condition then we need to
+          // skip processing of right expression.
+          // This flow has reached here assuming that this is a single
+          // column expression.
+          // we need to check if the other expression contains column
+          // expression or not in depth.
+          if (checkIfExpressionContainsColumn(left)) {
+            isExpressionResolve = true;
+          } else {
+            dimColumnResolvedFilterInfo
+                .setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
+            dimColumnResolvedFilterInfo.setFilterValues(
+                FilterUtil.getFilterListForRS(left, columnExpression, defaultValue, surrogate));
+          }
+        }
+      }
+    }
+    if (this.isExpressionResolve && exp instanceof ConditionalExpression) {
+      ConditionalExpression conditionalExpression = (ConditionalExpression) exp;
+      List<ColumnExpression> columnList = conditionalExpression.getColumnList();
+      dimColumnResolvedFilterInfo.setColumnIndex(columnList.get(0).getDimension().getOrdinal());
+      dimColumnResolvedFilterInfo.setFilterValues(FilterUtil
+          .getFilterListForAllMembersRS(exp, columnList.get(0), defaultValue, surrogate,
+              isIncludeFilter));
+    }
+
+  }
+
+  /**
+   * This method will check if a given expression contains a column expression recursively.
+   *
+   * @return boolean
+   */
+  private boolean checkIfExpressionContainsColumn(Expression expression) {
+    if (expression instanceof ColumnExpression) {
+      return true;
+    }
+    for (Expression child : expression.getChildren()) {
+      if (checkIfExpressionContainsColumn(child)) {
+        return true;
+      }
+    }
+
+    return false;
+  }
+
+  @Override public FilterResolverIntf getLeft() {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+  @Override public FilterResolverIntf getRight() {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+  /**
+   * Method will return the DimColumnResolvedFilterInfo instance which consists
+   * the mapping of the respective dimension and its surrogates involved in
+   * filter expression.
+   *
+   * @return DimColumnResolvedFilterInfo
+   */
+  public DimColumnResolvedFilterInfo getDimColResolvedFilterInfo() {
+    return dimColumnResolvedFilterInfo;
+  }
+
+  /**
+   * For restructure resolver no implementation is required for getting
+   * the start key since it already has default values
+   */
+  @Override public void getStartKey(long[] startKeys,
+      SortedMap<Integer, byte[]> noDicStartKeys, List<long[]> startKeyList) {
+
+  }
+
+  /**
+   * For restructure resolver no implementation is required for getting
+   * the end  key since it already has default values
+   *
+   * @return IndexKey.
+   */
+  @Override public void getEndKey(SegmentProperties segmentProperties,
+      AbsoluteTableIdentifier tableIdentifier, long[] endKeys,
+      SortedMap<Integer, byte[]> noDicEndKeys, List<long[]> endKeyList) {
+  }
+
+  /**
+   * Method will get the executer type inorder to create filter executer tree
+   *
+   * @return FilterExecuterType
+   */
+  @Override public FilterExecuterType getFilterExecuterType() {
+    return FilterExecuterType.RESTRUCTURE;
+  }
+
+  @Override public Expression getFilterExpression() {
+    // TODO Auto-generated method stub
+    return exp;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RowLevelFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RowLevelFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RowLevelFilterResolverImpl.java
new file mode 100644
index 0000000..466d048
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RowLevelFilterResolverImpl.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter.resolver;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.conditional.ConditionalExpression;
+import org.apache.carbondata.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
+
+public class RowLevelFilterResolverImpl extends ConditionalFilterResolverImpl {
+
+  private static final long serialVersionUID = 176122729713729929L;
+  protected boolean isExpressionResolve;
+  protected boolean isIncludeFilter;
+
+  private List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList;
+  private List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList;
+  private AbsoluteTableIdentifier tableIdentifier;
+
+  public RowLevelFilterResolverImpl(Expression exp, boolean isExpressionResolve,
+      boolean isIncludeFilter, AbsoluteTableIdentifier tableIdentifier) {
+    super(exp, isExpressionResolve, isIncludeFilter);
+    dimColEvaluatorInfoList =
+        new ArrayList<DimColumnResolvedFilterInfo>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    msrColEvalutorInfoList = new ArrayList<MeasureColumnResolvedFilterInfo>(
+        CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    this.tableIdentifier = tableIdentifier;
+  }
+
+  /**
+   * Method which will resolve the filter expression by converting the filter member
+   * to its assigned dictionary values.
+   */
+  public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) {
+    DimColumnResolvedFilterInfo dimColumnEvaluatorInfo = null;
+    MeasureColumnResolvedFilterInfo msrColumnEvalutorInfo = null;
+    int index = 0;
+    if (exp instanceof ConditionalExpression) {
+      ConditionalExpression conditionalExpression = (ConditionalExpression) exp;
+      List<ColumnExpression> columnList = conditionalExpression.getColumnList();
+      for (ColumnExpression columnExpression : columnList) {
+        if (columnExpression.isDimension()) {
+          dimColumnEvaluatorInfo = new DimColumnResolvedFilterInfo();
+          dimColumnEvaluatorInfo.setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
+          dimColumnEvaluatorInfo.setRowIndex(index++);
+          dimColumnEvaluatorInfo.setDimension(columnExpression.getDimension());
+          dimColumnEvaluatorInfo.setDimensionExistsInCurrentSilce(false);
+          dimColEvaluatorInfoList.add(dimColumnEvaluatorInfo);
+        } else {
+          msrColumnEvalutorInfo = new MeasureColumnResolvedFilterInfo();
+          msrColumnEvalutorInfo.setRowIndex(index++);
+          msrColumnEvalutorInfo.setAggregator(
+              ((CarbonMeasure) columnExpression.getCarbonColumn()).getAggregateFunction());
+          msrColumnEvalutorInfo
+              .setColumnIndex(((CarbonMeasure) columnExpression.getCarbonColumn()).getOrdinal());
+          msrColumnEvalutorInfo.setType(columnExpression.getCarbonColumn().getDataType());
+          msrColEvalutorInfoList.add(msrColumnEvalutorInfo);
+        }
+      }
+    }
+  }
+
+  /**
+   * This method will provide the executer type to the callee inorder to identify
+   * the executer type for the filter resolution, Row level filter executer is a
+   * special executer since it get all the rows of the specified filter dimension
+   * and will be send to the spark for processing
+   */
+  @Override public FilterExecuterType getFilterExecuterType() {
+    return FilterExecuterType.ROWLEVEL;
+  }
+
+  /**
+   * Method will the read filter expression corresponding to the resolver.
+   * This method is required in row level executer inorder to evaluate the filter
+   * expression against spark, as mentioned above row level is a special type
+   * filter resolver.
+   *
+   * @return Expression
+   */
+  public Expression getFilterExpresion() {
+    return exp;
+  }
+
+  /**
+   * Method will return the DimColumnResolvedFilterInfo instance which consists
+   * the mapping of the respective dimension and its surrogates involved in
+   * filter expression.
+   *
+   * @return DimColumnResolvedFilterInfo
+   */
+  public List<DimColumnResolvedFilterInfo> getDimColEvaluatorInfoList() {
+    return dimColEvaluatorInfoList;
+  }
+
+  /**
+   * Method will return the DimColumnResolvedFilterInfo instance which containts
+   * measure level details.
+   *
+   * @return MeasureColumnResolvedFilterInfo
+   */
+  public List<MeasureColumnResolvedFilterInfo> getMsrColEvalutorInfoList() {
+    return msrColEvalutorInfoList;
+  }
+
+  /**
+   * Method will return table information which will be required for retrieving
+   * dictionary cache inorder to read all the members of respective dimension.
+   *
+   * @return AbsoluteTableIdentifier
+   */
+  public AbsoluteTableIdentifier getTableIdentifier() {
+    return tableIdentifier;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
new file mode 100644
index 0000000..0f9b47d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter.resolver;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.SortedMap;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.conditional.BinaryConditionalExpression;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.expression.logical.BinaryLogicalExpression;
+import org.apache.carbondata.scan.filter.DimColumnFilterInfo;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
+
+public class RowLevelRangeFilterResolverImpl extends ConditionalFilterResolverImpl {
+
+  /**
+   *
+   */
+  private static final long serialVersionUID = 6629319265336666789L;
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(RowLevelRangeFilterResolverImpl.class.getName());
+  private List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList;
+  private List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList;
+  private AbsoluteTableIdentifier tableIdentifier;
+
+  public RowLevelRangeFilterResolverImpl(Expression exp, boolean isExpressionResolve,
+      boolean isIncludeFilter, AbsoluteTableIdentifier tableIdentifier) {
+    super(exp, isExpressionResolve, isIncludeFilter);
+    dimColEvaluatorInfoList =
+        new ArrayList<DimColumnResolvedFilterInfo>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    msrColEvalutorInfoList = new ArrayList<MeasureColumnResolvedFilterInfo>(
+        CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    this.tableIdentifier = tableIdentifier;
+  }
+
+  /**
+   * This method will return the filter values which is present in the range level
+   * conditional expressions.
+   *
+   * @return
+   */
+  public byte[][] getFilterRangeValues(SegmentProperties segmentProperties) {
+
+    if (null != dimColEvaluatorInfoList.get(0).getFilterValues() && !dimColEvaluatorInfoList.get(0)
+        .getDimension().hasEncoding(Encoding.DICTIONARY)) {
+      List<byte[]> noDictFilterValuesList =
+          dimColEvaluatorInfoList.get(0).getFilterValues().getNoDictionaryFilterValuesList();
+      return noDictFilterValuesList.toArray((new byte[noDictFilterValuesList.size()][]));
+    } else if (null != dimColEvaluatorInfoList.get(0).getFilterValues() && dimColEvaluatorInfoList
+        .get(0).getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+      return FilterUtil.getKeyArray(this.dimColEvaluatorInfoList.get(0).getFilterValues(),
+          this.dimColEvaluatorInfoList.get(0).getDimension(),
+          segmentProperties.getDimensionKeyGenerator());
+    }
+    return null;
+
+  }
+
+  /**
+   * method will get the start key based on the filter surrogates
+   *
+   * @return start IndexKey
+   */
+  public void getStartKey(long[] startKey,
+      SortedMap<Integer, byte[]> noDictStartKeys, List<long[]> startKeyList) {
+    if (null == dimColEvaluatorInfoList.get(0).getStarIndexKey()) {
+      try {
+        FilterUtil.getStartKey(dimColEvaluatorInfoList.get(0).getDimensionResolvedFilterInstance(),
+            startKey, startKeyList);
+        FilterUtil
+            .getStartKeyForNoDictionaryDimension(dimColEvaluatorInfoList.get(0), noDictStartKeys);
+      } catch (QueryExecutionException e) {
+        LOGGER.error("Can not get the start key during block prune");
+      }
+    }
+  }
+
+  /**
+   * method will get the start key based on the filter surrogates
+   *
+   * @return end IndexKey
+   */
+  @Override public void getEndKey(SegmentProperties segmentProperties,
+      AbsoluteTableIdentifier absoluteTableIdentifier, long[] endKeys,
+      SortedMap<Integer, byte[]> noDicEndKeys, List<long[]> endKeyList) {
+    if (null == dimColEvaluatorInfoList.get(0).getEndIndexKey()) {
+      try {
+        FilterUtil.getEndKey(dimColEvaluatorInfoList.get(0).getDimensionResolvedFilterInstance(),
+            absoluteTableIdentifier, endKeys, segmentProperties, endKeyList);
+        FilterUtil
+            .getEndKeyForNoDictionaryDimension(dimColEvaluatorInfoList.get(0), noDicEndKeys);
+      } catch (QueryExecutionException e) {
+        // TODO Auto-generated catch block
+        LOGGER.error("Can not get the end key during block prune");
+      }
+    }
+  }
+
+  private List<byte[]> getNoDictionaryRangeValues() {
+    List<ExpressionResult> listOfExpressionResults = new ArrayList<ExpressionResult>(20);
+    if (this.getFilterExpression() instanceof BinaryConditionalExpression) {
+      listOfExpressionResults =
+          ((BinaryConditionalExpression) this.getFilterExpression()).getLiterals();
+    }
+    List<byte[]> filterValuesList = new ArrayList<byte[]>(20);
+    boolean invalidRowsPresent = false;
+    for (ExpressionResult result : listOfExpressionResults) {
+      try {
+        if (result.getString() == null) {
+          filterValuesList.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL.getBytes());
+          continue;
+        }
+        filterValuesList.add(result.getString().getBytes());
+      } catch (FilterIllegalMemberException e) {
+        // Any invalid member while evaluation shall be ignored, system will log the
+        // error only once since all rows the evaluation happens so inorder to avoid
+        // too much log inforation only once the log will be printed.
+        FilterUtil.logError(e, invalidRowsPresent);
+      }
+    }
+    Comparator<byte[]> filterNoDictValueComaparator = new Comparator<byte[]>() {
+      @Override public int compare(byte[] filterMember1, byte[] filterMember2) {
+        return ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterMember1, filterMember2);
+      }
+
+    };
+    Collections.sort(filterValuesList, filterNoDictValueComaparator);
+    return filterValuesList;
+  }
+
+  /**
+   * Method which will resolve the filter expression by converting the filter
+   * member to its assigned dictionary values.
+   */
+  public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) {
+    DimColumnResolvedFilterInfo dimColumnEvaluatorInfo = null;
+    MeasureColumnResolvedFilterInfo msrColumnEvalutorInfo = null;
+    int index = 0;
+    if (exp instanceof BinaryLogicalExpression) {
+      BinaryLogicalExpression conditionalExpression = (BinaryLogicalExpression) exp;
+      List<ColumnExpression> columnList = conditionalExpression.getColumnList();
+      for (ColumnExpression columnExpression : columnList) {
+        if (columnExpression.isDimension()) {
+          dimColumnEvaluatorInfo = new DimColumnResolvedFilterInfo();
+          DimColumnFilterInfo filterInfo = new DimColumnFilterInfo();
+          dimColumnEvaluatorInfo.setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
+          //dimColumnEvaluatorInfo.se
+          dimColumnEvaluatorInfo.setRowIndex(index++);
+          dimColumnEvaluatorInfo.setDimension(columnExpression.getDimension());
+          dimColumnEvaluatorInfo.setDimensionExistsInCurrentSilce(false);
+          if (columnExpression.getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+            filterInfo.setFilterList(getDirectSurrogateValues(columnExpression));
+          } else {
+            filterInfo.setFilterListForNoDictionaryCols(getNoDictionaryRangeValues());
+          }
+          filterInfo.setIncludeFilter(isIncludeFilter);
+          dimColumnEvaluatorInfo.setFilterValues(filterInfo);
+          dimColumnEvaluatorInfo
+              .addDimensionResolvedFilterInstance(columnExpression.getDimension(), filterInfo);
+          dimColEvaluatorInfoList.add(dimColumnEvaluatorInfo);
+        } else {
+          msrColumnEvalutorInfo = new MeasureColumnResolvedFilterInfo();
+          msrColumnEvalutorInfo.setRowIndex(index++);
+          msrColumnEvalutorInfo.setAggregator(
+              ((CarbonMeasure) columnExpression.getCarbonColumn()).getAggregateFunction());
+          msrColumnEvalutorInfo
+              .setColumnIndex(((CarbonMeasure) columnExpression.getCarbonColumn()).getOrdinal());
+          msrColumnEvalutorInfo.setType(columnExpression.getCarbonColumn().getDataType());
+          msrColEvalutorInfoList.add(msrColumnEvalutorInfo);
+        }
+      }
+    }
+  }
+
+  private List<Integer> getDirectSurrogateValues(ColumnExpression columnExpression) {
+    List<ExpressionResult> listOfExpressionResults = new ArrayList<ExpressionResult>(20);
+    DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
+        .getDirectDictionaryGenerator(columnExpression.getDimension().getDataType());
+
+    if (this.getFilterExpression() instanceof BinaryConditionalExpression) {
+      listOfExpressionResults =
+          ((BinaryConditionalExpression) this.getFilterExpression()).getLiterals();
+    }
+    List<Integer> filterValuesList = new ArrayList<Integer>(20);
+    try {
+      // if any filter member provided by user is invalid throw error else
+      // system can display inconsistent result.
+      for (ExpressionResult result : listOfExpressionResults) {
+        filterValuesList.add(directDictionaryGenerator
+            .generateDirectSurrogateKey(result.getString(),
+                CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+      }
+    } catch (FilterIllegalMemberException e) {
+      new FilterUnsupportedException(e);
+    }
+    return filterValuesList;
+  }
+
+  /**
+   * Method will return the DimColumnResolvedFilterInfo instance which consists
+   * the mapping of the respective dimension and its surrogates involved in
+   * filter expression.
+   *
+   * @return DimColumnResolvedFilterInfo
+   */
+  public List<DimColumnResolvedFilterInfo> getDimColEvaluatorInfoList() {
+    return dimColEvaluatorInfoList;
+  }
+
+  /**
+   * Method will return the DimColumnResolvedFilterInfo instance which containts
+   * measure level details.
+   *
+   * @return MeasureColumnResolvedFilterInfo
+   */
+  public List<MeasureColumnResolvedFilterInfo> getMsrColEvalutorInfoList() {
+    return msrColEvalutorInfoList;
+  }
+
+  public AbsoluteTableIdentifier getTableIdentifier() {
+    return tableIdentifier;
+  }
+
+  public Expression getFilterExpression() {
+    return this.exp;
+  }
+
+  /**
+   * This method will provide the executer type to the callee inorder to identify
+   * the executer type for the filter resolution, Row level filter executer is a
+   * special executer since it get all the rows of the specified filter dimension
+   * and will be send to the spark for processing
+   */
+  public FilterExecuterType getFilterExecuterType() {
+    switch (exp.getFilterExpressionType()) {
+      case GREATERTHAN:
+        return FilterExecuterType.ROWLEVEL_GREATERTHAN;
+      case GREATERTHAN_EQUALTO:
+        return FilterExecuterType.ROWLEVEL_GREATERTHAN_EQUALTO;
+      case LESSTHAN:
+        return FilterExecuterType.ROWLEVEL_LESSTHAN;
+      case LESSTHAN_EQUALTO:
+        return FilterExecuterType.ROWLEVEL_LESSTHAN_EQUALTO;
+
+      default:
+        return FilterExecuterType.ROWLEVEL;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/metadata/FilterResolverMetadata.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/metadata/FilterResolverMetadata.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/metadata/FilterResolverMetadata.java
new file mode 100644
index 0000000..cabba34
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/metadata/FilterResolverMetadata.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.resolver.metadata;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.Expression;
+
+public class FilterResolverMetadata {
+  private AbsoluteTableIdentifier tableIdentifier;
+  private Expression expression;
+  private ColumnExpression columnExpression;
+  private boolean isIncludeFilter;
+
+  public AbsoluteTableIdentifier getTableIdentifier() {
+    return tableIdentifier;
+  }
+
+  public void setTableIdentifier(AbsoluteTableIdentifier tableIdentifier) {
+    this.tableIdentifier = tableIdentifier;
+  }
+
+  public Expression getExpression() {
+    return expression;
+  }
+
+  public void setExpression(Expression expression) {
+    this.expression = expression;
+  }
+
+  public ColumnExpression getColumnExpression() {
+    return columnExpression;
+  }
+
+  public void setColumnExpression(ColumnExpression columnExpression) {
+    this.columnExpression = columnExpression;
+  }
+
+  public boolean isIncludeFilter() {
+    return isIncludeFilter;
+  }
+
+  public void setIncludeFilter(boolean isIncludeFilter) {
+    this.isIncludeFilter = isIncludeFilter;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java
new file mode 100644
index 0000000..bf61d85
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter.resolver.resolverinfo;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.DimColumnFilterInfo;
+import org.apache.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.visitable.ResolvedFilterInfoVisitable;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.visitor.ResolvedFilterInfoVisitorIntf;
+
+public class DimColumnResolvedFilterInfo implements Serializable, ResolvedFilterInfoVisitable {
+  /**
+   *
+   */
+  private static final long serialVersionUID = 3428115141211084114L;
+
+  /**
+   * column index in file
+   */
+  private int columnIndex = -1;
+
+  /**
+   * need compressed data from file
+   */
+  private boolean needCompressedData;
+
+  /**
+   * rowIndex
+   */
+  private int rowIndex = -1;
+
+  private boolean isDimensionExistsInCurrentSilce = true;
+
+  private int rsSurrogates;
+
+  private String defaultValue;
+
+  private CarbonDimension dimension;
+
+  /**
+   * start index key of the block based on the keygenerator
+   */
+  private transient IndexKey starIndexKey;
+
+  /**
+   * end index key  which is been formed considering the max surrogate values
+   * from dictionary cache
+   */
+  private transient IndexKey endIndexKey;
+
+  /**
+   * reolved filter object of a particlar filter Expression.
+   */
+  private DimColumnFilterInfo resolvedFilterValueObj;
+
+  private Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionResolvedFilter;
+
+  public DimColumnResolvedFilterInfo() {
+    dimensionResolvedFilter = new HashMap<CarbonDimension, List<DimColumnFilterInfo>>(20);
+  }
+
+  public IndexKey getStarIndexKey() {
+    return starIndexKey;
+  }
+
+  public void setStarIndexKey(IndexKey starIndexKey) {
+    this.starIndexKey = starIndexKey;
+  }
+
+  public IndexKey getEndIndexKey() {
+    return endIndexKey;
+  }
+
+  public void setEndIndexKey(IndexKey endIndexKey) {
+    this.endIndexKey = endIndexKey;
+  }
+
+  public void addDimensionResolvedFilterInstance(CarbonDimension dimension,
+      DimColumnFilterInfo filterResolvedObj) {
+    List<DimColumnFilterInfo> currentVals = dimensionResolvedFilter.get(dimension);
+    if (null == currentVals) {
+      currentVals = new ArrayList<DimColumnFilterInfo>(20);
+      currentVals.add(filterResolvedObj);
+      dimensionResolvedFilter.put(dimension, currentVals);
+    } else {
+      currentVals.add(filterResolvedObj);
+    }
+  }
+
+  public Map<CarbonDimension, List<DimColumnFilterInfo>> getDimensionResolvedFilterInstance() {
+    return dimensionResolvedFilter;
+  }
+
+  public CarbonDimension getDimension() {
+    return dimension;
+  }
+
+  public void setDimension(CarbonDimension dimension) {
+    this.dimension = dimension;
+  }
+
+  public int getColumnIndex() {
+    return columnIndex;
+  }
+
+  public void setColumnIndex(int columnIndex) {
+    this.columnIndex = columnIndex;
+  }
+
+  public boolean isNeedCompressedData() {
+    return needCompressedData;
+  }
+
+  public void setNeedCompressedData(boolean needCompressedData) {
+    this.needCompressedData = needCompressedData;
+  }
+
+  public DimColumnFilterInfo getFilterValues() {
+    return resolvedFilterValueObj;
+  }
+
+  public void setFilterValues(final DimColumnFilterInfo resolvedFilterValueObj) {
+    this.resolvedFilterValueObj = resolvedFilterValueObj;
+  }
+
+  public int getRowIndex() {
+    return rowIndex;
+  }
+
+  public void setRowIndex(int rowIndex) {
+    this.rowIndex = rowIndex;
+  }
+
+  public boolean isDimensionExistsInCurrentSilce() {
+    return isDimensionExistsInCurrentSilce;
+  }
+
+  public void setDimensionExistsInCurrentSilce(boolean isDimensionExistsInCurrentSilce) {
+    this.isDimensionExistsInCurrentSilce = isDimensionExistsInCurrentSilce;
+  }
+
+  public int getRsSurrogates() {
+    return rsSurrogates;
+  }
+
+  public void setRsSurrogates(int rsSurrogates) {
+    this.rsSurrogates = rsSurrogates;
+  }
+
+  public String getDefaultValue() {
+    return defaultValue;
+  }
+
+  public void setDefaultValue(String defaultValue) {
+    this.defaultValue = defaultValue;
+  }
+
+  @Override public void populateFilterInfoBasedOnColumnType(ResolvedFilterInfoVisitorIntf visitor,
+      FilterResolverMetadata metadata) throws FilterUnsupportedException {
+    if (null != visitor) {
+      visitor.populateFilterResolvedInfo(this, metadata);
+      this.addDimensionResolvedFilterInstance(metadata.getColumnExpression().getDimension(),
+          this.getFilterValues());
+      this.setDimension(metadata.getColumnExpression().getDimension());
+      this.setColumnIndex(metadata.getColumnExpression().getDimension().getOrdinal());
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java
new file mode 100644
index 0000000..8055ae9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter.resolver.resolverinfo;
+
+import java.io.Serializable;
+
+public class MeasureColumnResolvedFilterInfo implements Serializable {
+  /**
+   *
+   */
+  private static final long serialVersionUID = 4222568289115151561L;
+
+  private int columnIndex = -1;
+
+  private int rowIndex = -1;
+
+  private Object uniqueValue;
+
+  private String aggregator;
+
+  private boolean isMeasureExistsInCurrentSlice = true;
+
+  private Object defaultValue;
+
+  private org.apache.carbondata.core.carbon.metadata.datatype.DataType type;
+
+  public int getColumnIndex() {
+    return columnIndex;
+  }
+
+  public void setColumnIndex(int columnIndex) {
+    this.columnIndex = columnIndex;
+  }
+
+  public int getRowIndex() {
+    return rowIndex;
+  }
+
+  public void setRowIndex(int rowIndex) {
+    this.rowIndex = rowIndex;
+  }
+
+  public Object getUniqueValue() {
+    return uniqueValue;
+  }
+
+  public void setUniqueValue(Object uniqueValue) {
+    this.uniqueValue = uniqueValue;
+  }
+
+  public org.apache.carbondata.core.carbon.metadata.datatype.DataType getType() {
+    return type;
+  }
+
+  public void setType(org.apache.carbondata.core.carbon.metadata.datatype.DataType dataType) {
+    this.type = dataType;
+  }
+
+  /**
+   * @return Returns the aggregator.
+   */
+  public String getAggregator() {
+    return aggregator;
+  }
+
+  /**
+   * @param aggregator The aggregator to set.
+   */
+  public void setAggregator(String aggregator) {
+    this.aggregator = aggregator;
+  }
+
+  public boolean isMeasureExistsInCurrentSlice() {
+    return isMeasureExistsInCurrentSlice;
+  }
+
+  public void setMeasureExistsInCurrentSlice(boolean isMeasureExistsInCurrentSlice) {
+    this.isMeasureExistsInCurrentSlice = isMeasureExistsInCurrentSlice;
+  }
+
+  public Object getDefaultValue() {
+    return defaultValue;
+  }
+
+  public void setDefaultValue(double defaultValue) {
+    this.defaultValue = defaultValue;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitable/ResolvedFilterInfoVisitable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitable/ResolvedFilterInfoVisitable.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitable/ResolvedFilterInfoVisitable.java
new file mode 100644
index 0000000..bb71b96
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitable/ResolvedFilterInfoVisitable.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.resolver.resolverinfo.visitable;
+
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.visitor.ResolvedFilterInfoVisitorIntf;
+
+public interface ResolvedFilterInfoVisitable {
+  /**
+   * This visitable method will visit through the visitor classes which is passed as parameter
+   * and based on different visitor the visitable filter instance will be resolved.
+   *
+   * @param visitor
+   * @param metadata
+   * @throws QueryExecutionException
+   * @throws FilterUnsupportedException
+   */
+  void populateFilterInfoBasedOnColumnType(ResolvedFilterInfoVisitorIntf visitor,
+      FilterResolverMetadata metadata) throws FilterUnsupportedException;
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java
new file mode 100644
index 0000000..3b2ec99
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.resolver.resolverinfo.visitor;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.DimColumnFilterInfo;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+
+public class CustomTypeDictionaryVisitor implements ResolvedFilterInfoVisitorIntf {
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(CustomTypeDictionaryVisitor.class.getName());
+
+  /**
+   * This Visitor method is been used to resolve or populate the filter details
+   * by using custom type dictionary value, the filter membrers will be resolved using
+   * custom type function which will generate dictionary for the direct column type filter members
+   *
+   * @param visitableObj
+   * @param metadata
+   * @throws FilterUnsupportedException,if exception occurs while evaluating
+   * filter models.
+   */
+  public void populateFilterResolvedInfo(DimColumnResolvedFilterInfo visitableObj,
+      FilterResolverMetadata metadata) throws FilterUnsupportedException {
+    DimColumnFilterInfo resolvedFilterObject = null;
+
+    List<String> evaluateResultListFinal;
+    try {
+      evaluateResultListFinal = metadata.getExpression().evaluate(null).getListAsString();
+    } catch (FilterIllegalMemberException e) {
+      throw new FilterUnsupportedException(e);
+    }
+    boolean isNotTimestampType = FilterUtil.checkIfDataTypeNotTimeStamp(metadata.getExpression());
+    resolvedFilterObject = getDirectDictionaryValKeyMemberForFilter(metadata.getTableIdentifier(),
+        metadata.getColumnExpression(), evaluateResultListFinal, metadata.isIncludeFilter(),
+        isNotTimestampType);
+    if (!metadata.isIncludeFilter() && null != resolvedFilterObject && !resolvedFilterObject
+        .getFilterList().contains(CarbonCommonConstants.MEMBER_DEFAULT_VAL_SURROGATE_KEY)) {
+      // Adding default surrogate key of null member inorder to not display the same while
+      // displaying the report as per hive compatibility.
+      resolvedFilterObject.getFilterList()
+          .add(CarbonCommonConstants.MEMBER_DEFAULT_VAL_SURROGATE_KEY);
+      Collections.sort(resolvedFilterObject.getFilterList());
+    }
+    visitableObj.setFilterValues(resolvedFilterObject);
+  }
+
+  private DimColumnFilterInfo getDirectDictionaryValKeyMemberForFilter(
+      AbsoluteTableIdentifier tableIdentifier, ColumnExpression columnExpression,
+      List<String> evaluateResultListFinal, boolean isIncludeFilter, boolean isNotTimestampType) {
+    List<Integer> surrogates = new ArrayList<Integer>(20);
+    DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
+        .getDirectDictionaryGenerator(columnExpression.getDimension().getDataType());
+    // Reading the dictionary value direct
+    getSurrogateValuesForDictionary(evaluateResultListFinal, surrogates, isNotTimestampType,
+        directDictionaryGenerator);
+
+    Collections.sort(surrogates);
+    DimColumnFilterInfo columnFilterInfo = null;
+    if (surrogates.size() > 0) {
+      columnFilterInfo = new DimColumnFilterInfo();
+      columnFilterInfo.setIncludeFilter(isIncludeFilter);
+      columnFilterInfo.setFilterList(surrogates);
+    }
+    return columnFilterInfo;
+  }
+
+  private void getSurrogateValuesForDictionary(List<String> evaluateResultListFinal,
+      List<Integer> surrogates, boolean isNotTimestampType,
+      DirectDictionaryGenerator directDictionaryGenerator) {
+    String timeFormat = CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT;
+    if (isNotTimestampType) {
+      timeFormat = null;
+    }
+    for (String filterMember : evaluateResultListFinal) {
+      surrogates
+          .add(directDictionaryGenerator.generateDirectSurrogateKey(filterMember, timeFormat));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java
new file mode 100644
index 0000000..6cb2348
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.resolver.resolverinfo.visitor;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.DimColumnFilterInfo;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+
+public class DictionaryColumnVisitor implements ResolvedFilterInfoVisitorIntf {
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(DictionaryColumnVisitor.class.getName());
+
+  /**
+   * This Visitor method is used to populate the visitableObj with direct dictionary filter details
+   * where the filters values will be resolve using dictionary cache.
+   *
+   * @param visitableObj
+   * @param metadata
+   * @throws FilterUnsupportedException,if exception occurs while evaluating
+   * filter models.
+   * @throws QueryExecutionException
+   */
+  public void populateFilterResolvedInfo(DimColumnResolvedFilterInfo visitableObj,
+      FilterResolverMetadata metadata) throws FilterUnsupportedException {
+    DimColumnFilterInfo resolvedFilterObject = null;
+    List<String> evaluateResultListFinal;
+    try {
+      evaluateResultListFinal = metadata.getExpression().evaluate(null).getListAsString();
+    } catch (FilterIllegalMemberException e) {
+      throw new FilterUnsupportedException(e);
+    }
+    try {
+      resolvedFilterObject = FilterUtil
+          .getFilterValues(metadata.getTableIdentifier(), metadata.getColumnExpression(),
+              evaluateResultListFinal, metadata.isIncludeFilter());
+      if (!metadata.isIncludeFilter() && null != resolvedFilterObject) {
+        // Adding default surrogate key of null member inorder to not display the same while
+        // displaying the report as per hive compatibility.
+        resolvedFilterObject.getFilterList()
+            .add(CarbonCommonConstants.MEMBER_DEFAULT_VAL_SURROGATE_KEY);
+        Collections.sort(resolvedFilterObject.getFilterList());
+      }
+    } catch (QueryExecutionException e) {
+      throw new FilterUnsupportedException(e);
+    }
+    visitableObj.setFilterValues(resolvedFilterObject);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/FilterInfoTypeVisitorFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/FilterInfoTypeVisitorFactory.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/FilterInfoTypeVisitorFactory.java
new file mode 100644
index 0000000..986bedc
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/FilterInfoTypeVisitorFactory.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.resolver.resolverinfo.visitor;
+
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+
+public class FilterInfoTypeVisitorFactory {
+
+  /**
+   * This factory method will be used in order to get the visitor instance based on the
+   * column expression metadata where filters has been applied.
+   *
+   * @param columnExpression
+   * @return
+   */
+  public static ResolvedFilterInfoVisitorIntf getResolvedFilterInfoVisitor(
+      ColumnExpression columnExpression) {
+    if (columnExpression.getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+      return new CustomTypeDictionaryVisitor();
+    } else if (!columnExpression.getDimension().hasEncoding(Encoding.DICTIONARY)) {
+      return new NoDictionaryTypeVisitor();
+    } else if (columnExpression.getDimension().hasEncoding(Encoding.DICTIONARY)) {
+      return new DictionaryColumnVisitor();
+    }
+
+    return null;
+  }
+}


[28/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/IncludeColGroupFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/IncludeColGroupFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/IncludeColGroupFilterExecuterImpl.java
new file mode 100644
index 0000000..edceb82
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/IncludeColGroupFilterExecuterImpl.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+import org.apache.carbondata.scan.executor.util.QueryUtil;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+
+/**
+ * It checks if filter is required on given block and if required, it does
+ * linear search on block data and set the bitset.
+ */
+public class IncludeColGroupFilterExecuterImpl extends IncludeFilterExecuterImpl {
+
+  /**
+   * LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(IncludeColGroupFilterExecuterImpl.class.getName());
+
+  /**
+   * @param dimColResolvedFilterInfo
+   * @param segmentProperties
+   */
+  public IncludeColGroupFilterExecuterImpl(DimColumnResolvedFilterInfo dimColResolvedFilterInfo,
+      SegmentProperties segmentProperties) {
+    super(dimColResolvedFilterInfo, segmentProperties);
+  }
+
+  /**
+   * It fills BitSet with row index which matches filter key
+   */
+  protected BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+
+    try {
+      KeyStructureInfo keyStructureInfo = getKeyStructureInfo();
+      byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+      for (int i = 0; i < filterValues.length; i++) {
+        byte[] filterVal = filterValues[i];
+        for (int rowId = 0; rowId < numerOfRows; rowId++) {
+          byte[] colData = new byte[keyStructureInfo.getMaskByteRanges().length];
+          dimensionColumnDataChunk.fillChunkData(colData, 0, rowId, keyStructureInfo);
+          if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, colData) == 0) {
+            bitSet.set(rowId);
+          }
+        }
+      }
+
+    } catch (Exception e) {
+      LOGGER.error(e);
+    }
+
+    return bitSet;
+  }
+
+  /**
+   * It is required for extracting column data from columngroup chunk
+   *
+   * @return
+   * @throws KeyGenException
+   */
+  private KeyStructureInfo getKeyStructureInfo() throws KeyGenException {
+    int colGrpId = getColumnGroupId(dimColumnEvaluatorInfo.getColumnIndex());
+    KeyGenerator keyGenerator = segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
+    List<Integer> mdKeyOrdinal = new ArrayList<Integer>();
+    mdKeyOrdinal.add(getMdkeyOrdinal(dimColumnEvaluatorInfo.getColumnIndex(), colGrpId));
+    int[] maskByteRanges = QueryUtil.getMaskedByteRangeBasedOrdinal(mdKeyOrdinal, keyGenerator);
+    byte[] maxKey = QueryUtil.getMaxKeyBasedOnOrinal(mdKeyOrdinal, keyGenerator);
+    int[] maksedByte = QueryUtil.getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
+    KeyStructureInfo restructureInfos = new KeyStructureInfo();
+    restructureInfos.setKeyGenerator(keyGenerator);
+    restructureInfos.setMaskByteRanges(maskByteRanges);
+    restructureInfos.setMaxKey(maxKey);
+    restructureInfos.setMaskedBytes(maksedByte);
+    return restructureInfos;
+  }
+
+  /**
+   * Check if scan is required on given block based on min and max value
+   */
+  public BitSet isScanRequired(byte[][] blkMaxVal, byte[][] blkMinVal) {
+    BitSet bitSet = new BitSet(1);
+    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    int columnIndex = dimColumnEvaluatorInfo.getColumnIndex();
+    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping().get(columnIndex);
+    int[] cols = getAllColumns(columnIndex);
+    byte[] maxValue = getMinMaxData(cols, blkMaxVal[blockIndex], columnIndex);
+    byte[] minValue = getMinMaxData(cols, blkMinVal[blockIndex], columnIndex);
+    boolean isScanRequired = false;
+    for (int k = 0; k < filterValues.length; k++) {
+      // filter value should be in range of max and min value i.e
+      // max>filtervalue>min
+      // so filter-max should be negative
+      int maxCompare = ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], maxValue);
+      // and filter-min should be positive
+      int minCompare = ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], minValue);
+
+      // if any filter value is in range than this block needs to be
+      // scanned
+      if (maxCompare <= 0 && minCompare >= 0) {
+        isScanRequired = true;
+        break;
+      }
+    }
+    if (isScanRequired) {
+      bitSet.set(0);
+    }
+    return bitSet;
+  }
+
+  /**
+   * It extract min and max data for given column from stored min max value
+   *
+   * @param colGrpColumns
+   * @param minMaxData
+   * @param columnIndex
+   * @return
+   */
+  private byte[] getMinMaxData(int[] colGrpColumns, byte[] minMaxData, int columnIndex) {
+    int startIndex = 0;
+    int endIndex = 0;
+    if (null != colGrpColumns) {
+      for (int i = 0; i < colGrpColumns.length; i++) {
+        int colGrpId = getColumnGroupId(colGrpColumns[i]);
+        int mdKeyOrdinal = getMdkeyOrdinal(colGrpColumns[i], colGrpId);
+        int[] byteRange = getKeyGenerator(colGrpId).getKeyByteOffsets(mdKeyOrdinal);
+        int colSize = 0;
+        for (int j = byteRange[0]; j <= byteRange[1]; j++) {
+          colSize++;
+        }
+        if (colGrpColumns[i] == columnIndex) {
+          endIndex = startIndex + colSize;
+          break;
+        }
+        startIndex += colSize;
+      }
+    }
+    byte[] data = new byte[endIndex - startIndex];
+    System.arraycopy(minMaxData, startIndex, data, 0, data.length);
+    return data;
+  }
+
+  /**
+   * It returns column groups which have provided column ordinal
+   *
+   * @param columnIndex
+   * @return column group array
+   */
+  private int[] getAllColumns(int columnIndex) {
+    int[][] colGroups = segmentProperties.getColumnGroups();
+    for (int i = 0; i < colGroups.length; i++) {
+      if (QueryUtil.searchInArray(colGroups[i], columnIndex)) {
+        return colGroups[i];
+      }
+    }
+    return null;
+  }
+
+  private int getMdkeyOrdinal(int ordinal, int colGrpId) {
+    return segmentProperties.getColumnGroupMdKeyOrdinal(colGrpId, ordinal);
+  }
+
+  private int getColumnGroupId(int ordinal) {
+    int[][] columnGroups = segmentProperties.getColumnGroups();
+    int colGrpId = -1;
+    for (int i = 0; i < columnGroups.length; i++) {
+      if (columnGroups[i].length > 1) {
+        colGrpId++;
+        if (QueryUtil.searchInArray(columnGroups[i], ordinal)) {
+          break;
+        }
+      }
+    }
+    return colGrpId;
+  }
+
+  public KeyGenerator getKeyGenerator(int colGrpId) {
+    return segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/IncludeFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/IncludeFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/IncludeFilterExecuterImpl.java
new file mode 100644
index 0000000..8e645b3
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/IncludeFilterExecuterImpl.java
@@ -0,0 +1,206 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.VariableLengthDimensionDataChunk;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public class IncludeFilterExecuterImpl implements FilterExecuter {
+
+  protected DimColumnResolvedFilterInfo dimColumnEvaluatorInfo;
+  protected DimColumnExecuterFilterInfo dimColumnExecuterInfo;
+  protected SegmentProperties segmentProperties;
+
+  public IncludeFilterExecuterImpl(DimColumnResolvedFilterInfo dimColumnEvaluatorInfo,
+      SegmentProperties segmentProperties) {
+    this.dimColumnEvaluatorInfo = dimColumnEvaluatorInfo;
+    this.segmentProperties = segmentProperties;
+    dimColumnExecuterInfo = new DimColumnExecuterFilterInfo();
+    FilterUtil.prepareKeysFromSurrogates(dimColumnEvaluatorInfo.getFilterValues(),
+        segmentProperties.getDimensionKeyGenerator(), dimColumnEvaluatorInfo.getDimension(),
+        dimColumnExecuterInfo);
+
+  }
+
+  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder) {
+    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
+        .get(dimColumnEvaluatorInfo.getColumnIndex());
+    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
+      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
+          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
+    }
+    return getFilteredIndexes(blockChunkHolder.getDimensionDataChunk()[blockIndex],
+        blockChunkHolder.getDataBlock().nodeSize());
+  }
+
+  protected BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows) {
+    if (dimensionColumnDataChunk.getAttributes().isNoDictionary()
+        && dimensionColumnDataChunk instanceof VariableLengthDimensionDataChunk) {
+      return setDirectKeyFilterIndexToBitSet(
+          (VariableLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows);
+    } else if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexes()
+        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+      return setFilterdIndexToBitSetWithColumnIndex(
+          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows);
+    }
+
+    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
+  }
+
+  private BitSet setDirectKeyFilterIndexToBitSet(
+      VariableLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    List<byte[]> listOfColumnarKeyBlockDataForNoDictionaryVals =
+        dimensionColumnDataChunk.getCompleteDataChunk();
+    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    int[] columnIndexArray = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
+    int[] columnReverseIndexArray =
+        dimensionColumnDataChunk.getAttributes().getInvertedIndexesReverse();
+    for (int i = 0; i < filterValues.length; i++) {
+      byte[] filterVal = filterValues[i];
+      if (null != listOfColumnarKeyBlockDataForNoDictionaryVals) {
+        if (null != columnIndexArray) {
+          for (int index : columnIndexArray) {
+            byte[] noDictionaryVal =
+                listOfColumnarKeyBlockDataForNoDictionaryVals.get(columnReverseIndexArray[index]);
+            if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, noDictionaryVal) == 0) {
+              bitSet.set(index);
+            }
+          }
+        } else if (null != columnReverseIndexArray) {
+          for (int index : columnReverseIndexArray) {
+            byte[] noDictionaryVal =
+                listOfColumnarKeyBlockDataForNoDictionaryVals.get(columnReverseIndexArray[index]);
+            if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, noDictionaryVal) == 0) {
+              bitSet.set(index);
+            }
+          }
+        } else {
+          for (int index = 0;
+               index < listOfColumnarKeyBlockDataForNoDictionaryVals.size(); index++) {
+            if (ByteUtil.UnsafeComparer.INSTANCE
+                .compareTo(filterVal, listOfColumnarKeyBlockDataForNoDictionaryVals.get(index))
+                == 0) {
+              bitSet.set(index);
+            }
+          }
+        }
+      }
+    }
+    return bitSet;
+
+  }
+
+  private BitSet setFilterdIndexToBitSetWithColumnIndex(
+      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    int[] columnIndex = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
+    int start = 0;
+    int last = 0;
+    int startIndex = 0;
+    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    for (int i = 0; i < filterValues.length; i++) {
+      start = CarbonUtil
+          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+              filterValues[i], false);
+      if (start < 0) {
+        continue;
+      }
+      bitSet.set(columnIndex[start]);
+      last = start;
+      for (int j = start + 1; j < numerOfRows; j++) {
+        if (ByteUtil.UnsafeComparer.INSTANCE
+            .compareTo(dimensionColumnDataChunk.getCompleteDataChunk(), j * filterValues[i].length,
+                filterValues[i].length, filterValues[i], 0, filterValues[i].length) == 0) {
+          bitSet.set(columnIndex[j]);
+          last++;
+        } else {
+          break;
+        }
+      }
+      startIndex = last;
+      if (startIndex >= numerOfRows) {
+        break;
+      }
+    }
+    return bitSet;
+  }
+
+  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+      FixedLengthDimensionDataChunk fixedDimensionChunk =
+          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk;
+      byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+      for (int k = 0; k < filterValues.length; k++) {
+        for (int j = 0; j < numerOfRows; j++) {
+          if (ByteUtil.UnsafeComparer.INSTANCE
+              .compareTo(fixedDimensionChunk.getCompleteDataChunk(), j * filterValues[k].length,
+                  filterValues[k].length, filterValues[k], 0, filterValues[k].length) == 0) {
+            bitSet.set(j);
+          }
+        }
+      }
+    }
+    return bitSet;
+  }
+
+  public BitSet isScanRequired(byte[][] blkMaxVal, byte[][] blkMinVal) {
+    BitSet bitSet = new BitSet(1);
+    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    int columnIndex = dimColumnEvaluatorInfo.getColumnIndex();
+    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping().get(columnIndex);
+
+    boolean isScanRequired = false;
+    for (int k = 0; k < filterValues.length; k++) {
+      // filter value should be in range of max and min value i.e
+      // max>filtervalue>min
+      // so filter-max should be negative
+      int maxCompare =
+          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blkMaxVal[blockIndex]);
+      // and filter-min should be positive
+      int minCompare =
+          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blkMinVal[blockIndex]);
+
+      // if any filter value is in range than this block needs to be
+      // scanned
+      if (maxCompare <= 0 && minCompare >= 0) {
+        isScanRequired = true;
+        break;
+      }
+    }
+    if (isScanRequired) {
+      bitSet.set(0);
+    }
+    return bitSet;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/OrFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/OrFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/OrFilterExecuterImpl.java
new file mode 100644
index 0000000..8fd5431
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/OrFilterExecuterImpl.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.BitSet;
+
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public class OrFilterExecuterImpl implements FilterExecuter {
+
+  private FilterExecuter leftExecuter;
+  private FilterExecuter rightExecuter;
+
+  public OrFilterExecuterImpl(FilterExecuter leftExecuter, FilterExecuter rightExecuter) {
+    this.leftExecuter = leftExecuter;
+    this.rightExecuter = rightExecuter;
+  }
+
+  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
+      throws FilterUnsupportedException {
+    BitSet leftFilters = leftExecuter.applyFilter(blockChunkHolder);
+    BitSet rightFilters = rightExecuter.applyFilter(blockChunkHolder);
+    leftFilters.or(rightFilters);
+
+    return leftFilters;
+  }
+
+  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
+    BitSet leftFilters = leftExecuter.isScanRequired(blockMaxValue, blockMinValue);
+    BitSet rightFilters = rightExecuter.isScanRequired(blockMaxValue, blockMinValue);
+    leftFilters.or(rightFilters);
+    return leftFilters;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/RestructureFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/RestructureFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RestructureFilterExecuterImpl.java
new file mode 100644
index 0000000..84d7898
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RestructureFilterExecuterImpl.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.BitSet;
+
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+
+public class RestructureFilterExecuterImpl implements FilterExecuter {
+
+  DimColumnExecuterFilterInfo dimColumnExecuterInfo;
+
+  public RestructureFilterExecuterImpl(DimColumnResolvedFilterInfo dimColumnResolvedFilterInfo,
+      KeyGenerator blockKeyGenerator) {
+    dimColumnExecuterInfo = new DimColumnExecuterFilterInfo();
+    FilterUtil
+        .prepareKeysFromSurrogates(dimColumnResolvedFilterInfo.getFilterValues(), blockKeyGenerator,
+            dimColumnResolvedFilterInfo.getDimension(), dimColumnExecuterInfo);
+  }
+
+  @Override public BitSet applyFilter(BlocksChunkHolder blocksChunkHolder) {
+    BitSet bitSet = new BitSet(blocksChunkHolder.getDataBlock().nodeSize());
+    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    if (null != filterValues && filterValues.length > 0) {
+      bitSet.set(0, blocksChunkHolder.getDataBlock().nodeSize());
+    }
+    return bitSet;
+  }
+
+  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
+    BitSet bitSet = new BitSet(1);
+    bitSet.set(0);
+    return bitSet;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelFilterExecuterImpl.java
new file mode 100644
index 0000000..00f80bb
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelFilterExecuterImpl.java
@@ -0,0 +1,402 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.VariableLengthDimensionDataChunk;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.DataTypeUtil;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+import org.apache.carbondata.scan.executor.util.QueryUtil;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.GenericQueryType;
+import org.apache.carbondata.scan.filter.intf.RowImpl;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public class RowLevelFilterExecuterImpl implements FilterExecuter {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(RowLevelFilterExecuterImpl.class.getName());
+  protected List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList;
+  protected List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList;
+  protected Expression exp;
+  protected AbsoluteTableIdentifier tableIdentifier;
+  protected SegmentProperties segmentProperties;
+  /**
+   * it has index at which given dimension is stored in file
+   */
+  private int[] blocksIndex;
+
+  private Map<Integer, GenericQueryType> complexDimensionInfoMap;
+
+  public RowLevelFilterExecuterImpl(List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
+      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
+      AbsoluteTableIdentifier tableIdentifier, SegmentProperties segmentProperties,
+      Map<Integer, GenericQueryType> complexDimensionInfoMap) {
+    this.dimColEvaluatorInfoList = dimColEvaluatorInfoList;
+    this.segmentProperties = segmentProperties;
+    this.blocksIndex = new int[dimColEvaluatorInfoList.size()];
+    for (int i = 0; i < dimColEvaluatorInfoList.size(); i++) {
+      this.blocksIndex[i] = segmentProperties.getDimensionOrdinalToBlockMapping()
+          .get(dimColEvaluatorInfoList.get(i).getColumnIndex());
+    }
+    if (null == msrColEvalutorInfoList) {
+      this.msrColEvalutorInfoList = new ArrayList<MeasureColumnResolvedFilterInfo>(20);
+    } else {
+      this.msrColEvalutorInfoList = msrColEvalutorInfoList;
+    }
+    this.exp = exp;
+    this.tableIdentifier = tableIdentifier;
+    this.complexDimensionInfoMap = complexDimensionInfoMap;
+  }
+
+  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
+      throws FilterUnsupportedException {
+    for (int i = 0; i < dimColEvaluatorInfoList.size(); i++) {
+      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo = dimColEvaluatorInfoList.get(i);
+      if (dimColumnEvaluatorInfo.getDimension().getDataType() != DataType.ARRAY
+          && dimColumnEvaluatorInfo.getDimension().getDataType() != DataType.STRUCT) {
+        if (null == blockChunkHolder.getDimensionDataChunk()[blocksIndex[i]]) {
+          blockChunkHolder.getDimensionDataChunk()[blocksIndex[i]] = blockChunkHolder.getDataBlock()
+              .getDimensionChunk(blockChunkHolder.getFileReader(), blocksIndex[i]);
+        }
+      } else {
+        GenericQueryType complexType = complexDimensionInfoMap.get(blocksIndex[i]);
+        complexType.fillRequiredBlockData(blockChunkHolder);
+      }
+    }
+
+    // CHECKSTYLE:OFF Approval No:Approval-V1R2C10_001
+    if (null != msrColEvalutorInfoList) {
+      for (MeasureColumnResolvedFilterInfo msrColumnEvalutorInfo : msrColEvalutorInfoList) {
+        if (msrColumnEvalutorInfo.isMeasureExistsInCurrentSlice() && null == blockChunkHolder
+            .getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()]) {
+          blockChunkHolder.getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()] =
+              blockChunkHolder.getDataBlock().getMeasureChunk(blockChunkHolder.getFileReader(),
+                  msrColumnEvalutorInfo.getColumnIndex());
+        }
+      }
+    }
+    // CHECKSTYLE:ON
+
+    int numberOfRows = blockChunkHolder.getDataBlock().nodeSize();
+    BitSet set = new BitSet(numberOfRows);
+    RowIntf row = new RowImpl();
+    boolean invalidRowsPresent = false;
+    for (int index = 0; index < numberOfRows; index++) {
+      try {
+        createRow(blockChunkHolder, row, index);
+      } catch (QueryExecutionException e) {
+        FilterUtil.logError(e, invalidRowsPresent);
+      }
+      Boolean rslt = false;
+      try {
+        rslt = exp.evaluate(row).getBoolean();
+      }
+      // Any invalid member while evaluation shall be ignored, system will log the
+      // error only once since all rows the evaluation happens so inorder to avoid
+      // too much log inforation only once the log will be printed.
+      catch (FilterIllegalMemberException e) {
+        FilterUtil.logError(e, invalidRowsPresent);
+      }
+      if (null != rslt && rslt) {
+        set.set(index);
+      }
+    }
+    return set;
+  }
+
+  /**
+   * Method will read the members of particular dimension block and create
+   * a row instance for further processing of the filters
+   *
+   * @param blockChunkHolder
+   * @param row
+   * @param index
+   * @throws QueryExecutionException
+   */
+  private void createRow(BlocksChunkHolder blockChunkHolder, RowIntf row, int index)
+      throws QueryExecutionException {
+    Object[] record = new Object[dimColEvaluatorInfoList.size() + msrColEvalutorInfoList.size()];
+    String memberString = null;
+    for (int i = 0; i < dimColEvaluatorInfoList.size(); i++) {
+      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo = dimColEvaluatorInfoList.get(i);
+      if (dimColumnEvaluatorInfo.getDimension().getDataType() != DataType.ARRAY
+          && dimColumnEvaluatorInfo.getDimension().getDataType() != DataType.STRUCT) {
+        if (!dimColumnEvaluatorInfo.isDimensionExistsInCurrentSilce()) {
+          record[dimColumnEvaluatorInfo.getRowIndex()] = dimColumnEvaluatorInfo.getDefaultValue();
+        }
+        if (!dimColumnEvaluatorInfo.getDimension().hasEncoding(Encoding.DICTIONARY)
+            && blockChunkHolder
+            .getDimensionDataChunk()[blocksIndex[i]] instanceof VariableLengthDimensionDataChunk) {
+
+          VariableLengthDimensionDataChunk dimensionColumnDataChunk =
+              (VariableLengthDimensionDataChunk) blockChunkHolder
+                  .getDimensionDataChunk()[blocksIndex[i]];
+          if (null != dimensionColumnDataChunk.getCompleteDataChunk()) {
+            memberString =
+                readMemberBasedOnNoDictionaryVal(dimColumnEvaluatorInfo, dimensionColumnDataChunk,
+                    index);
+            if (null != memberString) {
+              if (memberString.equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
+                memberString = null;
+              }
+            }
+            record[dimColumnEvaluatorInfo.getRowIndex()] = DataTypeUtil
+                .getDataBasedOnDataType(memberString,
+                    dimColumnEvaluatorInfo.getDimension().getDataType());
+          } else {
+            continue;
+          }
+        } else {
+          int dictionaryValue =
+              readSurrogatesFromColumnBlock(blockChunkHolder, index, dimColumnEvaluatorInfo,
+                  blocksIndex[i]);
+          Dictionary forwardDictionary = null;
+          if (dimColumnEvaluatorInfo.getDimension().hasEncoding(Encoding.DICTIONARY)
+              && !dimColumnEvaluatorInfo.getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+            memberString =
+                getFilterActualValueFromDictionaryValue(dimColumnEvaluatorInfo, dictionaryValue,
+                    forwardDictionary);
+            record[dimColumnEvaluatorInfo.getRowIndex()] = DataTypeUtil
+                .getDataBasedOnDataType(memberString,
+                    dimColumnEvaluatorInfo.getDimension().getDataType());
+          } else if (dimColumnEvaluatorInfo.getDimension()
+              .hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+
+            Object member = getFilterActualValueFromDirectDictionaryValue(dimColumnEvaluatorInfo,
+                dictionaryValue);
+            record[dimColumnEvaluatorInfo.getRowIndex()] = member;
+          }
+        }
+      } else {
+        try {
+          GenericQueryType complexType = complexDimensionInfoMap.get(blocksIndex[i]);
+          ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
+          DataOutputStream dataOutputStream = new DataOutputStream(byteStream);
+          complexType
+              .parseBlocksAndReturnComplexColumnByteArray(blockChunkHolder.getDimensionDataChunk(),
+                  index, dataOutputStream);
+          record[dimColumnEvaluatorInfo.getRowIndex()] = complexType
+              .getDataBasedOnDataTypeFromSurrogates(ByteBuffer.wrap(byteStream.toByteArray()));
+          byteStream.close();
+        } catch (IOException e) {
+          LOGGER.info(e.getMessage());
+        }
+      }
+    }
+
+    DataType msrType;
+
+    for (MeasureColumnResolvedFilterInfo msrColumnEvalutorInfo : msrColEvalutorInfoList) {
+      switch (msrColumnEvalutorInfo.getType()) {
+        case INT:
+        case LONG:
+          msrType = DataType.LONG;
+          break;
+        case DECIMAL:
+          msrType = DataType.DECIMAL;
+          break;
+        default:
+          msrType = DataType.DOUBLE;
+      }
+      // if measure doesnt exist then set the default value.
+      if (!msrColumnEvalutorInfo.isMeasureExistsInCurrentSlice()) {
+        record[msrColumnEvalutorInfo.getRowIndex()] = msrColumnEvalutorInfo.getDefaultValue();
+      } else {
+        Object msrValue;
+        switch (msrType) {
+          case INT:
+          case LONG:
+            msrValue =
+                blockChunkHolder.getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()]
+                    .getMeasureDataHolder().getReadableLongValueByIndex(index);
+            break;
+          case DECIMAL:
+            msrValue =
+                blockChunkHolder.getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()]
+                    .getMeasureDataHolder().getReadableBigDecimalValueByIndex(index);
+            break;
+          default:
+            msrValue =
+                blockChunkHolder.getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()]
+                    .getMeasureDataHolder().getReadableDoubleValueByIndex(index);
+        }
+        record[msrColumnEvalutorInfo.getRowIndex()] =
+            blockChunkHolder.getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()]
+                .getNullValueIndexHolder().getBitSet().get(index) ? null : msrValue;
+
+      }
+    }
+    row.setValues(record);
+  }
+
+  /**
+   * method will read the actual data from the direct dictionary generator
+   * by passing direct dictionary value.
+   *
+   * @param dimColumnEvaluatorInfo
+   * @param dictionaryValue
+   * @return
+   */
+  private Object getFilterActualValueFromDirectDictionaryValue(
+      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo, int dictionaryValue) {
+    Object memberString = null;
+    DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
+        .getDirectDictionaryGenerator(dimColumnEvaluatorInfo.getDimension().getDataType());
+    if (null != directDictionaryGenerator) {
+      memberString = directDictionaryGenerator.getValueFromSurrogate(dictionaryValue);
+    }
+    return memberString;
+  }
+
+  /**
+   * Read the actual filter member by passing the dictionary value from
+   * the forward dictionary cache which which holds column wise cache
+   *
+   * @param dimColumnEvaluatorInfo
+   * @param dictionaryValue
+   * @param forwardDictionary
+   * @return
+   * @throws QueryExecutionException
+   */
+  private String getFilterActualValueFromDictionaryValue(
+      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo, int dictionaryValue,
+      Dictionary forwardDictionary) throws QueryExecutionException {
+    String memberString;
+    try {
+      forwardDictionary = FilterUtil
+          .getForwardDictionaryCache(tableIdentifier, dimColumnEvaluatorInfo.getDimension());
+    } catch (QueryExecutionException e) {
+      throw new QueryExecutionException(e);
+    }
+
+    memberString = forwardDictionary.getDictionaryValueForKey(dictionaryValue);
+    if (null != memberString) {
+      if (memberString.equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
+        memberString = null;
+      }
+    }
+    return memberString;
+  }
+
+  /**
+   * read the filter member dictionary data from the block corresponding to
+   * applied filter column
+   *
+   * @param blockChunkHolder
+   * @param index
+   * @param dimColumnEvaluatorInfo
+   * @return
+   */
+  private int readSurrogatesFromColumnBlock(BlocksChunkHolder blockChunkHolder, int index,
+      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo, int blockIndex) {
+    if (dimColumnEvaluatorInfo.getDimension().isColumnar()) {
+      byte[] rawData = blockChunkHolder.getDimensionDataChunk()[blockIndex].getChunkData(index);
+      ByteBuffer byteBuffer = ByteBuffer.allocate(CarbonCommonConstants.INT_SIZE_IN_BYTE);
+      int dictionaryValue = CarbonUtil.getSurrogateKey(rawData, byteBuffer);
+      return dictionaryValue;
+    } else {
+      return readSurrogatesFromColumnGroupBlock(blockChunkHolder, index, dimColumnEvaluatorInfo,
+          blockIndex);
+    }
+
+  }
+
+  /**
+   * @param blockChunkHolder
+   * @param index
+   * @param dimColumnEvaluatorInfo
+   * @return read surrogate of given row of given column group dimension
+   */
+  private int readSurrogatesFromColumnGroupBlock(BlocksChunkHolder blockChunkHolder, int index,
+      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo, int blockIndex) {
+    try {
+      KeyStructureInfo keyStructureInfo =
+          QueryUtil.getKeyStructureInfo(segmentProperties, dimColumnEvaluatorInfo);
+      byte[] colData = blockChunkHolder.getDimensionDataChunk()[blockIndex].getChunkData(index);
+      long[] result = keyStructureInfo.getKeyGenerator().getKeyArray(colData);
+      int colGroupId =
+          QueryUtil.getColumnGroupId(segmentProperties, dimColumnEvaluatorInfo.getColumnIndex());
+      int dictionaryValue = (int) result[segmentProperties
+          .getColumnGroupMdKeyOrdinal(colGroupId, dimColumnEvaluatorInfo.getColumnIndex())];
+      return dictionaryValue;
+    } catch (KeyGenException e) {
+      LOGGER.error(e);
+    }
+    return 0;
+  }
+
+  /**
+   * Reading the blocks for no dictionary data, in no dictionary case
+   * directly the filter data will read, no need to scan the dictionary
+   * or read the dictionary value.
+   *
+   * @param dimColumnEvaluatorInfo
+   * @param dimensionColumnDataChunk
+   * @param index
+   * @return
+   */
+  private String readMemberBasedOnNoDictionaryVal(
+      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo,
+      VariableLengthDimensionDataChunk dimensionColumnDataChunk, int index) {
+    byte[] noDictionaryVals;
+    if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexesReverse()) {
+      // Getting the data for direct surrogates.
+      noDictionaryVals = dimensionColumnDataChunk.getCompleteDataChunk()
+          .get(dimensionColumnDataChunk.getAttributes().getInvertedIndexesReverse()[index]);
+    } else {
+      noDictionaryVals = dimensionColumnDataChunk.getCompleteDataChunk().get(index);
+    }
+    return new String(noDictionaryVals, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
+  }
+
+  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
+    BitSet bitSet = new BitSet(1);
+    bitSet.set(0);
+    return bitSet;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
new file mode 100644
index 0000000..8a215dc
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public class RowLevelRangeGrtThanFiterExecuterImpl extends RowLevelFilterExecuterImpl {
+  private byte[][] filterRangeValues;
+
+  public RowLevelRangeGrtThanFiterExecuterImpl(
+      List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
+      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
+      AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
+      SegmentProperties segmentProperties) {
+    super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
+        null);
+    this.filterRangeValues = filterRangeValues;
+  }
+
+  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
+    BitSet bitSet = new BitSet(1);
+    byte[][] filterValues = this.filterRangeValues;
+    int columnIndex = this.dimColEvaluatorInfoList.get(0).getColumnIndex();
+    boolean isScanRequired = false;
+    for (int k = 0; k < filterValues.length; k++) {
+      // filter value should be in range of max and min value i.e
+      // max>filtervalue>min
+      // so filter-max should be negative
+      int maxCompare =
+          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMaxValue[columnIndex]);
+      // if any filter value is in range than this block needs to be
+      // scanned means always less than block max range.
+      if (maxCompare < 0) {
+        isScanRequired = true;
+        break;
+      }
+    }
+    if (isScanRequired) {
+      bitSet.set(0);
+    }
+    return bitSet;
+
+  }
+
+  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
+      throws FilterUnsupportedException {
+    if (!dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY)) {
+      return super.applyFilter(blockChunkHolder);
+    }
+    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
+        .get(dimColEvaluatorInfoList.get(0).getColumnIndex());
+    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
+      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
+          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
+    }
+    return getFilteredIndexes(blockChunkHolder.getDimensionDataChunk()[blockIndex],
+        blockChunkHolder.getDataBlock().nodeSize());
+  }
+
+  private BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows) {
+    if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexes()
+        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+      return setFilterdIndexToBitSetWithColumnIndex(
+          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows);
+    }
+    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
+  }
+
+  /**
+   * Method will scan the block and finds the range start index from which all members
+   * will be considered for applying range filters. this method will be called if the
+   * column is not supported by default so column index mapping  will be present for
+   * accesing the members from the block.
+   *
+   * @param dimensionColumnDataChunk
+   * @param numerOfRows
+   * @return BitSet.
+   */
+  private BitSet setFilterdIndexToBitSetWithColumnIndex(
+      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    int[] columnIndex = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
+    int start = 0;
+    int last = 0;
+    int startIndex = 0;
+    byte[][] filterValues = this.filterRangeValues;
+    for (int i = 0; i < filterValues.length; i++) {
+      start = CarbonUtil
+          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+              filterValues[i], true);
+      if (start >= 0) {
+        start = CarbonUtil.nextGreaterValueToTarget(start,
+            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, filterValues[i], numerOfRows);
+      }
+      // Logic will handle the case where the range filter member is not present in block
+      // in this case the binary search will return the index from where the bit sets will be
+      // set inorder to apply filters. this is greater than filter so the range will be taken
+      // from the next element which is greater than filter member.
+      if (start < 0) {
+        start = -(start + 1);
+        if (start == numerOfRows) {
+          start = start - 1;
+        }
+        // Method will compare the tentative index value after binary search, this tentative
+        // index needs to be compared by the filter member if its > filter then from that
+        // index the bitset will be considered for filtering process.
+        if (ByteUtil
+            .compare(filterValues[i], dimensionColumnDataChunk.getChunkData(columnIndex[start]))
+            > 0) {
+          start = start + 1;
+        }
+      }
+
+      last = start;
+      for (int j = start; j < numerOfRows; j++) {
+        bitSet.set(columnIndex[j]);
+        last++;
+      }
+      startIndex = last;
+      if (startIndex >= numerOfRows) {
+        break;
+      }
+    }
+
+    return bitSet;
+  }
+
+  /**
+   * Method will scan the block and finds the range start index from which all
+   * members will be considered for applying range filters. this method will
+   * be called if the column is sorted default so column index
+   * mapping will be present for accesing the members from the block.
+   *
+   * @param dimensionColumnDataChunk
+   * @param numerOfRows
+   * @return BitSet.
+   */
+  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+      int start = 0;
+      int last = 0;
+      int startIndex = 0;
+      byte[][] filterValues = this.filterRangeValues;
+      for (int k = 0; k < filterValues.length; k++) {
+        start = CarbonUtil.getFirstIndexUsingBinarySearch(
+            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+            filterValues[k], true);
+        start = CarbonUtil.nextGreaterValueToTarget(start,
+            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, filterValues[k], numerOfRows);
+        if (start < 0) {
+          start = -(start + 1);
+          if (start == numerOfRows) {
+            start = start - 1;
+          }
+          // Method will compare the tentative index value after binary search, this tentative
+          // index needs to be compared by the filter member if its > filter then from that
+          // index the bitset will be considered for filtering process.
+          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) > 0) {
+            start = start + 1;
+          }
+        }
+        last = start;
+        for (int j = start; j < numerOfRows; j++) {
+          bitSet.set(j);
+          last++;
+        }
+        startIndex = last;
+        if (startIndex >= numerOfRows) {
+          break;
+        }
+      }
+    }
+    return bitSet;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
new file mode 100644
index 0000000..808cb51
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public class RowLevelRangeGrtrThanEquaToFilterExecuterImpl extends RowLevelFilterExecuterImpl {
+
+  protected byte[][] filterRangeValues;
+
+  public RowLevelRangeGrtrThanEquaToFilterExecuterImpl(
+      List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
+      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
+      AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
+      SegmentProperties segmentProperties) {
+    super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
+        null);
+    this.filterRangeValues = filterRangeValues;
+  }
+
+  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
+    BitSet bitSet = new BitSet(1);
+    byte[][] filterValues = this.filterRangeValues;
+    int columnIndex = this.dimColEvaluatorInfoList.get(0).getColumnIndex();
+    boolean isScanRequired = false;
+    for (int k = 0; k < filterValues.length; k++) {
+      // filter value should be in range of max and min value i.e
+      // max>filtervalue>min
+      // so filter-max should be negative
+      int maxCompare =
+          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMaxValue[columnIndex]);
+      // if any filter value is in range than this block needs to be
+      // scanned less than equal to max range.
+      if (maxCompare <= 0) {
+        isScanRequired = true;
+        break;
+      }
+    }
+    if (isScanRequired) {
+      bitSet.set(0);
+    }
+    return bitSet;
+
+  }
+
+  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
+      throws FilterUnsupportedException {
+    if (!dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY)) {
+      return super.applyFilter(blockChunkHolder);
+    }
+    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
+        .get(dimColEvaluatorInfoList.get(0).getColumnIndex());
+    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
+      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
+          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
+    }
+    return getFilteredIndexes(blockChunkHolder.getDimensionDataChunk()[blockIndex],
+        blockChunkHolder.getDataBlock().nodeSize());
+  }
+
+  private BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows) {
+    if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexes()
+        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+      return setFilterdIndexToBitSetWithColumnIndex(
+          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows);
+    }
+    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
+  }
+
+  /**
+   * Method will scan the block and finds the range start index from which all members
+   * will be considered for applying range filters. this method will be called if the
+   * column is not supported by default so column index mapping  will be present for
+   * accesing the members from the block.
+   *
+   * @param dimensionColumnDataChunk
+   * @param numerOfRows
+   * @return BitSet.
+   */
+  private BitSet setFilterdIndexToBitSetWithColumnIndex(
+      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    int[] columnIndex = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
+    int start = 0;
+    int last = 0;
+    int startIndex = 0;
+    byte[][] filterValues = this.filterRangeValues;
+    for (int i = 0; i < filterValues.length; i++) {
+      start = CarbonUtil
+          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+              filterValues[i], false);
+      if (start < 0) {
+        start = -(start + 1);
+        if (start == numerOfRows) {
+          start = start - 1;
+        }
+        // Method will compare the tentative index value after binary search, this tentative
+        // index needs to be compared by the filter member if its >= filter then from that
+        // index the bitset will be considered for filtering process.
+        if (ByteUtil
+            .compare(filterValues[i], dimensionColumnDataChunk.getChunkData(columnIndex[start]))
+            >= 0) {
+          start = start + 1;
+        }
+      }
+      last = start;
+      for (int j = start; j < numerOfRows; j++) {
+
+        bitSet.set(columnIndex[j]);
+        last++;
+      }
+      startIndex = last;
+      if (startIndex >= numerOfRows) {
+        break;
+      }
+    }
+    return bitSet;
+  }
+
+  /**
+   * Method will scan the block and finds the range start index from which all
+   * members will be considered for applying range filters. this method will
+   * be called if the column is sorted default so column index
+   * mapping will be present for accesing the members from the block.
+   *
+   * @param dimensionColumnDataChunk
+   * @param numerOfRows
+   * @return BitSet.
+   */
+  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+      int start = 0;
+      int last = 0;
+      int startIndex = 0;
+      byte[][] filterValues = this.filterRangeValues;
+      for (int k = 0; k < filterValues.length; k++) {
+        start = CarbonUtil.getFirstIndexUsingBinarySearch(
+            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+            filterValues[k], false);
+        if (start < 0) {
+          start = -(start + 1);
+          if (start == numerOfRows) {
+            start = start - 1;
+          }
+          // Method will compare the tentative index value after binary search, this tentative
+          // index needs to be compared by the filter member if its >= filter then from that
+          // index the bitset will be considered for filtering process.
+          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start))
+              >= 0) {
+            start = start + 1;
+          }
+        }
+
+        last = start;
+        for (int j = start; j < numerOfRows; j++) {
+          bitSet.set(j);
+          last++;
+        }
+        startIndex = last;
+        if (startIndex >= numerOfRows) {
+          break;
+        }
+      }
+    }
+    return bitSet;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
new file mode 100644
index 0000000..367ebbd
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
@@ -0,0 +1,248 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public class RowLevelRangeLessThanEqualFilterExecuterImpl extends RowLevelFilterExecuterImpl {
+  protected byte[][] filterRangeValues;
+
+  public RowLevelRangeLessThanEqualFilterExecuterImpl(
+      List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
+      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
+      AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
+      SegmentProperties segmentProperties) {
+    super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
+        null);
+    this.filterRangeValues = filterRangeValues;
+  }
+
+  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
+    BitSet bitSet = new BitSet(1);
+    byte[][] filterValues = this.filterRangeValues;
+    int columnIndex = this.dimColEvaluatorInfoList.get(0).getColumnIndex();
+    boolean isScanRequired = false;
+    for (int k = 0; k < filterValues.length; k++) {
+      // and filter-min should be positive
+      int minCompare =
+          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMinValue[columnIndex]);
+
+      // if any filter applied is not in range of min and max of block
+      // then since its a less than equal to fiter validate whether the block
+      // min range is less than equal to applied filter member
+      if (minCompare >= 0) {
+        isScanRequired = true;
+        break;
+      }
+    }
+    if (isScanRequired) {
+      bitSet.set(0);
+    }
+    return bitSet;
+
+  }
+
+  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
+      throws FilterUnsupportedException {
+    if (!dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY)) {
+      return super.applyFilter(blockChunkHolder);
+    }
+    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
+        .get(dimColEvaluatorInfoList.get(0).getColumnIndex());
+    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
+      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
+          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
+    }
+    return getFilteredIndexes(blockChunkHolder.getDimensionDataChunk()[blockIndex],
+        blockChunkHolder.getDataBlock().nodeSize());
+  }
+
+  private BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows) {
+    byte[] defaultValue = null;
+    if (dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+      DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
+          .getDirectDictionaryGenerator(
+              dimColEvaluatorInfoList.get(0).getDimension().getDataType());
+      int key = directDictionaryGenerator.generateDirectSurrogateKey(null) + 1;
+      defaultValue = FilterUtil.getMaskKey(key, dimColEvaluatorInfoList.get(0).getDimension(),
+          this.segmentProperties.getDimensionKeyGenerator());
+    }
+    if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexes()
+        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+
+      return setFilterdIndexToBitSetWithColumnIndex(
+          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows, defaultValue);
+
+    }
+    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows, defaultValue);
+  }
+
+  /**
+   * Method will scan the block and finds the range start index from which all members
+   * will be considered for applying range filters. this method will be called if the
+   * column is not supported by default so column index mapping  will be present for
+   * accesing the members from the block.
+   *
+   * @param dimensionColumnDataChunk
+   * @param numerOfRows
+   * @return BitSet.
+   */
+  private BitSet setFilterdIndexToBitSetWithColumnIndex(
+      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows,
+      byte[] defaultValue) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    int[] columnIndex = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
+    int start = 0;
+    int last = 0;
+    int skip = 0;
+    int startIndex = 0;
+    byte[][] filterValues = this.filterRangeValues;
+    //find the number of default values to skip the null value in case of direct dictionary
+    if (null != defaultValue) {
+      start = CarbonUtil
+          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+              defaultValue, true);
+      if (start < 0) {
+        skip = -(start + 1);
+        // end of block
+        if (skip == numerOfRows) {
+          return bitSet;
+        }
+      } else {
+        skip = start;
+      }
+      startIndex = skip;
+    }
+    for (int i = 0; i < filterValues.length; i++) {
+      start = CarbonUtil
+          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+              filterValues[i], true);
+      if (start < 0) {
+        start = -(start + 1);
+        if (start == numerOfRows) {
+          start = start - 1;
+        }
+        // Method will compare the tentative index value after binary search, this tentative
+        // index needs to be compared by the filter member if its >= filter then from that
+        // index the bitset will be considered for filtering process.
+        if (ByteUtil
+            .compare(filterValues[i], dimensionColumnDataChunk.getChunkData(columnIndex[start]))
+            <= 0) {
+          start = start - 1;
+        }
+      }
+      last = start;
+      for (int j = start; j >= skip; j--) {
+        bitSet.set(columnIndex[j]);
+        last--;
+      }
+      startIndex = last;
+      if (startIndex <= 0) {
+        break;
+      }
+    }
+    return bitSet;
+  }
+
+  /**
+   * Method will scan the block and finds the range start index from which all
+   * members will be considered for applying range filters. this method will
+   * be called if the column is sorted default so column index
+   * mapping will be present for accesing the members from the block.
+   *
+   * @param dimensionColumnDataChunk
+   * @param numerOfRows
+   * @param defaultValue
+   * @return BitSet.
+   */
+  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows, byte[] defaultValue) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+      int start = 0;
+      int last = 0;
+      int startIndex = 0;
+      byte[][] filterValues = this.filterRangeValues;
+      int skip = 0;
+      //find the number of default values to skip the null value in case of direct dictionary
+      if (null != defaultValue) {
+        start = CarbonUtil.getFirstIndexUsingBinarySearch(
+            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+            defaultValue, true);
+        if (start < 0) {
+          skip = -(start + 1);
+          // end of block
+          if (skip == numerOfRows) {
+            return bitSet;
+          }
+        } else {
+          skip = start;
+        }
+        startIndex = skip;
+      }
+      for (int k = 0; k < filterValues.length; k++) {
+        start = CarbonUtil.getFirstIndexUsingBinarySearch(
+            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+            filterValues[k], true);
+        if (start < 0) {
+          start = -(start + 1);
+          if (start == numerOfRows) {
+            start = start - 1;
+          }
+          // Method will compare the tentative index value after binary search, this tentative
+          // index needs to be compared by the filter member if its <= filter then from that
+          // index the bitset will be considered for filtering process.
+          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start))
+              <= 0) {
+            start = start - 1;
+          }
+        }
+        last = start;
+        for (int j = start; j >= skip; j--) {
+          bitSet.set(j);
+          last--;
+        }
+        startIndex = last;
+        if (startIndex <= 0) {
+          break;
+        }
+      }
+    }
+    return bitSet;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
new file mode 100644
index 0000000..af7f135
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public class RowLevelRangeLessThanFiterExecuterImpl extends RowLevelFilterExecuterImpl {
+  private byte[][] filterRangeValues;
+
+  public RowLevelRangeLessThanFiterExecuterImpl(
+      List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
+      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
+      AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
+      SegmentProperties segmentProperties) {
+    super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
+        null);
+    this.filterRangeValues = filterRangeValues;
+  }
+
+  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
+    BitSet bitSet = new BitSet(1);
+    byte[][] filterValues = this.filterRangeValues;
+    int columnIndex = this.dimColEvaluatorInfoList.get(0).getColumnIndex();
+    boolean isScanRequired = false;
+    for (int k = 0; k < filterValues.length; k++) {
+      // and filter-min should be positive
+      int minCompare =
+          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMinValue[columnIndex]);
+
+      // if any filter applied is not in range of min and max of block
+      // then since its a less than fiter validate whether the block
+      // min range is less  than applied filter member
+      if (minCompare > 0) {
+        isScanRequired = true;
+        break;
+      }
+    }
+    if (isScanRequired) {
+      bitSet.set(0);
+    }
+    return bitSet;
+
+  }
+
+  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
+      throws FilterUnsupportedException {
+    if (!dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY)) {
+      return super.applyFilter(blockChunkHolder);
+    }
+    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
+        .get(dimColEvaluatorInfoList.get(0).getColumnIndex());
+    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
+      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
+          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
+    }
+    return getFilteredIndexes(blockChunkHolder.getDimensionDataChunk()[blockIndex],
+        blockChunkHolder.getDataBlock().nodeSize());
+  }
+
+  private BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows) {
+    byte[] defaultValue = null;
+    if (dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+      DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
+          .getDirectDictionaryGenerator(
+              dimColEvaluatorInfoList.get(0).getDimension().getDataType());
+      int key = directDictionaryGenerator.generateDirectSurrogateKey(null) + 1;
+      defaultValue = FilterUtil.getMaskKey(key, dimColEvaluatorInfoList.get(0).getDimension(),
+          this.segmentProperties.getDimensionKeyGenerator());
+    }
+    if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexes()
+        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+      return setFilterdIndexToBitSetWithColumnIndex(
+          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows, defaultValue);
+    }
+    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows, defaultValue);
+  }
+
+  /**
+   * Method will scan the block and finds the range start index from which all members
+   * will be considered for applying range filters. this method will be called if the
+   * column is not supported by default so column index mapping  will be present for
+   * accesing the members from the block.
+   *
+   * @param dimensionColumnDataChunk
+   * @param numerOfRows
+   * @return BitSet.
+   */
+  private BitSet setFilterdIndexToBitSetWithColumnIndex(
+      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows,
+      byte[] defaultValue) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    int[] columnIndex = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
+    int start = 0;
+    int last = 0;
+    int startIndex = 0;
+    int skip = 0;
+    byte[][] filterValues = this.filterRangeValues;
+
+    //find the number of default values to skip the null value in case of direct dictionary
+    if (null != defaultValue) {
+      start = CarbonUtil
+          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+              defaultValue, false);
+      if (start < 0) {
+        skip = -(start + 1);
+        // end of block
+        if (skip == numerOfRows) {
+          return bitSet;
+        }
+      } else {
+        skip = start;
+      }
+      startIndex = skip;
+    }
+
+    for (int i = 0; i < filterValues.length; i++) {
+      start = CarbonUtil
+          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+              filterValues[i], false);
+      // Logic will handle the case where the range filter member is not present in block
+      // in this case the binary search will return the index from where the bit sets will be
+      // set inorder to apply filters. this is Lesser than filter so the range will be taken
+      // from the prev element which is Lesser than filter member.
+      start = CarbonUtil.nextLesserValueToTarget(start, dimensionColumnDataChunk, filterValues[i]);
+      if (start < 0) {
+        start = -(start + 1);
+        if (start == numerOfRows) {
+          start = start - 1;
+        }
+        // Method will compare the tentative index value after binary search, this tentative
+        // index needs to be compared by the filter member if its < filter then from that
+        // index the bitset will be considered for filtering process.
+        if (ByteUtil
+            .compare(filterValues[i], dimensionColumnDataChunk.getChunkData(columnIndex[start]))
+            < 0) {
+          start = start - 1;
+        }
+      }
+      last = start;
+      for (int j = start; j >= skip; j--) {
+        bitSet.set(columnIndex[j]);
+        last--;
+      }
+      startIndex = last;
+      if (startIndex >= 0) {
+        break;
+      }
+    }
+    return bitSet;
+  }
+
+  /**
+   * Method will scan the block and finds the range start index from which all
+   * members will be considered for applying range filters. this method will
+   * be called if the column is sorted default so column index
+   * mapping will be present for accesing the members from the block.
+   *
+   * @param dimensionColumnDataChunk
+   * @param numerOfRows
+   * @return BitSet.
+   */
+  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows, byte[] defaultValue) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+      int start = 0;
+      int last = 0;
+      int startIndex = 0;
+      int skip = 0;
+      byte[][] filterValues = this.filterRangeValues;
+      //find the number of default values to skip the null value in case of direct dictionary
+      if (null != defaultValue) {
+        start = CarbonUtil.getFirstIndexUsingBinarySearch(
+            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+            defaultValue, false);
+        if (start < 0) {
+          skip = -(start + 1);
+          // end of block
+          if (skip == numerOfRows) {
+            return bitSet;
+          }
+        } else {
+          skip = start;
+        }
+        startIndex = skip;
+      }
+      for (int k = 0; k < filterValues.length; k++) {
+        start = CarbonUtil.getFirstIndexUsingBinarySearch(
+            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
+            filterValues[k], false);
+        start = CarbonUtil.nextLesserValueToTarget(start,
+            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, filterValues[k]);
+        if (start < 0) {
+          start = -(start + 1);
+          if (start >= numerOfRows) {
+            start = numerOfRows - 1;
+          }
+          // Method will compare the tentative index value after binary search, this tentative
+          // index needs to be compared by the filter member if its < filter then from that
+          // index the bitset will be considered for filtering process.
+          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) < 0) {
+            start = start - 1;
+          }
+        }
+        last = start;
+        for (int j = start; j >= skip; j--) {
+          bitSet.set(j);
+          last--;
+        }
+        startIndex = last;
+        if (startIndex <= 0) {
+          break;
+        }
+      }
+    }
+    return bitSet;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeTypeExecuterFacory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeTypeExecuterFacory.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeTypeExecuterFacory.java
new file mode 100644
index 0000000..4c46f6d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/RowLevelRangeTypeExecuterFacory.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.scan.filter.resolver.FilterResolverIntf;
+import org.apache.carbondata.scan.filter.resolver.RowLevelRangeFilterResolverImpl;
+
+public class RowLevelRangeTypeExecuterFacory {
+
+  private RowLevelRangeTypeExecuterFacory() {
+
+  }
+
+  /**
+   * The method returns the Row Level Range fiter type instance based on
+   * filter tree resolver type.
+   *
+   * @param filterExpressionResolverTree
+   * @param segmentProperties
+   * @param dataType                     DataType
+   * @return the generator instance
+   */
+  public static RowLevelFilterExecuterImpl getRowLevelRangeTypeExecuter(
+      FilterExecuterType filterExecuterType, FilterResolverIntf filterExpressionResolverTree,
+      SegmentProperties segmentProperties) {
+    switch (filterExecuterType) {
+
+      case ROWLEVEL_LESSTHAN:
+        return new RowLevelRangeLessThanFiterExecuterImpl(
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getDimColEvaluatorInfoList(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getMsrColEvalutorInfoList(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getFilterExpression(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getFilterRangeValues(segmentProperties), segmentProperties);
+      case ROWLEVEL_LESSTHAN_EQUALTO:
+        return new RowLevelRangeLessThanEqualFilterExecuterImpl(
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getDimColEvaluatorInfoList(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getMsrColEvalutorInfoList(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getFilterExpression(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getFilterRangeValues(segmentProperties), segmentProperties);
+      case ROWLEVEL_GREATERTHAN_EQUALTO:
+        return new RowLevelRangeGrtrThanEquaToFilterExecuterImpl(
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getDimColEvaluatorInfoList(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getMsrColEvalutorInfoList(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getFilterExpression(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getFilterRangeValues(segmentProperties), segmentProperties);
+      case ROWLEVEL_GREATERTHAN:
+        return new RowLevelRangeGrtThanFiterExecuterImpl(
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getDimColEvaluatorInfoList(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getMsrColEvalutorInfoList(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getFilterExpression(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
+            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
+                .getFilterRangeValues(segmentProperties), segmentProperties);
+      default:
+        // Scenario wont come logic must break
+        return null;
+
+    }
+  }
+
+}



[14/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/KeyGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/KeyGenerator.java b/core/src/main/java/org/carbondata/core/keygenerator/KeyGenerator.java
deleted file mode 100644
index f9016f8..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/KeyGenerator.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.keygenerator;
-
-import java.io.Serializable;
-import java.util.Comparator;
-
-/**
- * It generates the key by using multiple keys(typically multiple dimension keys
- * are combined to form a single key). And it can return the individual
- * key(dimensional key) out of combined key.
- */
-public interface KeyGenerator extends Serializable, Comparator<byte[]> {
-  /**
-   * It generates the single key aka byte array from multiple keys.
-   *
-   * @param keys
-   * @return byte array
-   * @throws KeyGenException
-   */
-  byte[] generateKey(long[] keys) throws KeyGenException;
-
-  /**
-   * It generates the single key aka byte array from multiple keys.
-   *
-   * @param keys
-   * @return
-   * @throws KeyGenException
-   */
-  byte[] generateKey(int[] keys) throws KeyGenException;
-
-  /**
-   * It gets array of keys out of single key aka byte array
-   *
-   * @param key
-   * @return array of keys.
-   */
-  long[] getKeyArray(byte[] key);
-
-  /**
-   * It gets array of keys out of single key aka byte array
-   *
-   * @param key
-   * @param offset
-   * @return array of keys.
-   */
-  long[] getKeyArray(byte[] key, int offset);
-
-  /**
-   * It gets array of keys out of single key aka byte array
-   *
-   * @param key
-   * @param maskedByteRanges
-   * @return array of keys
-   */
-  long[] getKeyArray(byte[] key, int[] maskedByteRanges);
-
-  /**
-   * It gets the key in the specified index from the single key aka byte array
-   *
-   * @param key
-   * @param index of key.
-   * @return key
-   */
-  long getKey(byte[] key, int index);
-
-  /**
-   * Set any extra properties if required.
-   */
-  void setProperty(Object key, Object value);
-
-  /**
-   * Gives the key size in number of bytes.
-   */
-  int getKeySizeInBytes();
-
-  /**
-   * It gets the specified index and size from the single key aka byte aray
-   *
-   * @param key
-   * @param index
-   * @param size
-   * @return
-   */
-  long[] getSubKeyArray(byte[] key, int index, int size);
-
-  /**
-   * returns key bytes offset
-   *
-   * @param index
-   * @return
-   */
-  int[] getKeyByteOffsets(int index);
-
-  int compare(byte[] key1, int offset1, int length1, byte[] key2, int offset2, int length2);
-
-  /**
-   * returns the dimension count
-   *
-   * @return
-   */
-  int getDimCount();
-
-  int getStartAndEndKeySizeWithOnlyPrimitives();
-
-  void setStartAndEndKeySizeWithOnlyPrimitives(int startAndEndKeySizeWithPrimitives);
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/columnar/ColumnarSplitter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/columnar/ColumnarSplitter.java b/core/src/main/java/org/carbondata/core/keygenerator/columnar/ColumnarSplitter.java
deleted file mode 100644
index c740cef..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/columnar/ColumnarSplitter.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.keygenerator.columnar;
-
-import org.carbondata.core.keygenerator.KeyGenException;
-
-/**
- * Splits the odometer key to columns.Further these columns can be stored in a columnar storage.
- */
-public interface ColumnarSplitter {
-  /**
-   * Splits generated MDKey to multiple columns.
-   *
-   * @param key MDKey
-   * @return Multiple columns in 2 dimensional byte array
-   */
-  byte[][] splitKey(byte[] key);
-
-  /**
-   * It generates and splits key to multiple columns
-   *
-   * @param keys
-   * @return
-   * @throws KeyGenException
-   */
-  byte[][] generateAndSplitKey(long[] keys) throws KeyGenException;
-
-  /**
-   * It generates and splits key to multiple columns
-   *
-   * @param keys
-   * @return
-   * @throws KeyGenException
-   */
-  byte[][] generateAndSplitKey(int[] keys) throws KeyGenException;
-
-  /**
-   * Takes the split keys and generates the surrogate key array
-   *
-   * @param key
-   * @return
-   */
-  long[] getKeyArray(byte[][] key);
-
-  /**
-   * Takes the split keys and generates the surrogate key array in bytes
-   *
-   * @param key
-   * @return
-   */
-  byte[] getKeyByteArray(byte[][] key);
-
-  /**
-   * Takes the split keys and generates the surrogate key array in bytes
-   *
-   * @param key
-   * @param columnIndexes, takes columnIndexes to consider which columns are present in the key
-   * @return
-   */
-  byte[] getKeyByteArray(byte[][] key, int[] columnIndexes);
-
-  /**
-   * Takes the split keys and generates the surrogate key array
-   *
-   * @param key
-   * @param columnIndexes, takes columnIndexes to consider which columns are present in the key
-   * @return
-   */
-  long[] getKeyArray(byte[][] key, int[] columnIndexes);
-
-  /**
-   * Below method will be used to get the block size
-   *
-   * @return
-   */
-  int[] getBlockKeySize();
-
-  /**
-   * Below method will be used to get the total key Size of the particular block
-   *
-   * @param blockIndexes
-   * @return
-   */
-  int getKeySizeByBlock(int[] blockIndexes);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthEquiSplitGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthEquiSplitGenerator.java b/core/src/main/java/org/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthEquiSplitGenerator.java
deleted file mode 100644
index 074cea6..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthEquiSplitGenerator.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.keygenerator.columnar.impl;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.keygenerator.KeyGenException;
-import org.carbondata.core.keygenerator.columnar.ColumnarSplitter;
-import org.carbondata.core.keygenerator.mdkey.MultiDimKeyVarLengthGenerator;
-
-/**
- * It is Equi Split implementation class of Columnar splitter. And uses var key length
- * generator to generate keys.
- * It splits depends on the @dimensionsToSplit parameter. This parameter decides how many
- * dimensions should be present in each column.
- */
-public class MultiDimKeyVarLengthEquiSplitGenerator extends MultiDimKeyVarLengthGenerator
-    implements ColumnarSplitter {
-
-  /**
-   *
-   */
-  private static final long serialVersionUID = -7767757692821917570L;
-
-  private byte dimensionsToSplit;
-
-  private int[][] splitDimArray;
-
-  private int[][] dimBlockArray;
-
-  private int[][][] byteRangesForDims;
-
-  private int[] blockKeySize;
-
-  public MultiDimKeyVarLengthEquiSplitGenerator(int[] lens, byte dimensionsToSplit) {
-    super(lens);
-    this.dimensionsToSplit = dimensionsToSplit;
-    intialize();
-  }
-
-  private void intialize() {
-    byte s = 0;
-    List<Set<Integer>> splitList =
-        new ArrayList<Set<Integer>>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-    Set<Integer> split = new TreeSet<Integer>();
-    splitList.add(split);
-    for (int i = 0; i < byteRangesForKeys.length; i++) {
-      if (s == dimensionsToSplit) {
-        s = 0;
-        split = new TreeSet<Integer>();
-        splitList.add(split);
-      }
-      for (int j = 0; j < byteRangesForKeys[i].length; j++) {
-        for (int j2 = byteRangesForKeys[i][0]; j2 <= byteRangesForKeys[i][1]; j2++) {
-          split.add(j2);
-        }
-      }
-      s++;
-    }
-    List<Integer>[] splits = new List[splitList.size()];
-    int i = 0;
-    for (Set<Integer> splitLocal : splitList) {
-      List<Integer> range = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-      for (Integer index : splitLocal) {
-        range.add(index);
-      }
-      splits[i++] = range;
-    }
-    for (int j = 1; j < splits.length; j++) {
-      if (splits[j - 1].get(splits[j - 1].size() - 1) == splits[j].get(0)) {
-        splits[j].remove(0);
-      }
-    }
-    splitDimArray = new int[splits.length][];
-    for (int j = 0; j < splits.length; j++) {
-      int[] a = convertToArray(splits[j]);
-      splitDimArray[j] = a.length > 0 ? new int[] { a[0], a[a.length - 1] } : a;
-    }
-
-    dimBlockArray = new int[byteRangesForKeys.length][];
-    Set<Integer>[] dimBlockSet = new Set[dimBlockArray.length];
-    for (int k = 0; k < byteRangesForKeys.length; k++) {
-      int[] dimRange = byteRangesForKeys[k];
-      Set<Integer> dimBlockPosSet = new TreeSet<Integer>();
-      dimBlockSet[k] = dimBlockPosSet;
-      for (int j = 0; j < splitDimArray.length; j++) {
-        if (dimRange[0] >= splitDimArray[j][0] && dimRange[0] <= splitDimArray[j][1]) {
-          dimBlockPosSet.add(j);
-        }
-        if (dimRange[1] >= splitDimArray[j][0] && dimRange[1] <= splitDimArray[j][1]) {
-          dimBlockPosSet.add(j);
-        }
-      }
-
-    }
-
-    for (int j = 0; j < dimBlockSet.length; j++) {
-      dimBlockArray[j] = convertToArray(dimBlockSet[j]);
-    }
-
-    int[][] splitDimArrayLocalIndexes = new int[splitDimArray.length][];
-    for (int j = 0; j < splitDimArrayLocalIndexes.length; j++) {
-      splitDimArrayLocalIndexes[j] = splitDimArray[j].length > 0 ?
-          new int[] { 0, splitDimArray[j][1] - splitDimArray[j][0] } :
-          new int[0];
-    }
-
-    byteRangesForDims = new int[byteRangesForKeys.length][][];
-    for (int j = 0; j < byteRangesForKeys.length; j++) {
-      if (dimBlockArray[j].length > 1) {
-        int[] bArray1 = splitDimArrayLocalIndexes[dimBlockArray[j][0]];
-        byteRangesForDims[j] = new int[2][2];
-        byteRangesForDims[j][0] =
-            new int[] { bArray1[bArray1.length - 1], bArray1[bArray1.length - 1] };
-        byteRangesForDims[j][1] = new int[] { 0,
-            (byteRangesForKeys[j][byteRangesForKeys[j].length - 1] - byteRangesForKeys[j][0]) - 1 };
-      } else {
-        byteRangesForDims[j] = new int[1][1];
-        int[] bArray1 = splitDimArray[dimBlockArray[j][0]];
-        byteRangesForDims[j][0] = new int[] { byteRangesForKeys[j][0] - bArray1[0],
-            byteRangesForKeys[j][1] - bArray1[0] };
-      }
-    }
-    blockKeySize = new int[splitDimArray.length];
-
-    for (int j = 0; j < blockKeySize.length; j++) {
-      blockKeySize[j] =
-          splitDimArray[j].length > 0 ? splitDimArray[j][1] - splitDimArray[j][0] + 1 : 0;
-    }
-  }
-
-  private int[] convertToArray(List<Integer> list) {
-    int[] ints = new int[list.size()];
-    for (int i = 0; i < ints.length; i++) {
-      ints[i] = list.get(i);
-    }
-    return ints;
-  }
-
-  private int[] convertToArray(Set<Integer> set) {
-    int[] ints = new int[set.size()];
-    int i = 0;
-    for (Iterator iterator = set.iterator(); iterator.hasNext(); ) {
-      ints[i++] = (Integer) iterator.next();
-    }
-    return ints;
-  }
-
-  @Override public byte[][] splitKey(byte[] key) {
-    byte[][] split = new byte[blockKeySize.length][];
-    int copyIndex = 0;
-    for (int i = 0; i < split.length; i++) {
-      split[i] = new byte[blockKeySize[i]];
-      System.arraycopy(key, copyIndex, split[i], 0, split[i].length);
-      copyIndex += blockKeySize[i];
-    }
-    return split;
-  }
-
-  @Override public byte[][] generateAndSplitKey(long[] keys) throws KeyGenException {
-    return splitKey(generateKey(keys));
-  }
-
-  @Override public byte[][] generateAndSplitKey(int[] keys) throws KeyGenException {
-    return splitKey(generateKey(keys));
-  }
-
-  @Override public long[] getKeyArray(byte[][] key) {
-    byte[] fullKey = new byte[getKeySizeInBytes()];
-    int copyIndex = 0;
-    for (int i = 0; i < key.length; i++) {
-      System.arraycopy(key[i], 0, fullKey, copyIndex, key[i].length);
-      copyIndex += key[i].length;
-    }
-    return getKeyArray(fullKey);
-  }
-
-  @Override public byte[] getKeyByteArray(byte[][] key) {
-    byte[] fullKey = new byte[getKeySizeInBytes()];
-    int copyIndex = 0;
-    for (int i = 0; i < key.length; i++) {
-      System.arraycopy(key[i], 0, fullKey, copyIndex, key[i].length);
-      copyIndex += key[i].length;
-    }
-    return fullKey;
-  }
-
-  @Override public byte[] getKeyByteArray(byte[][] key, int[] columnIndexes) {
-    return null;
-  }
-
-  @Override public long[] getKeyArray(byte[][] key, int[] columnIndexes) {
-    return null;
-  }
-
-  public int[] getBlockKeySize() {
-    return blockKeySize;
-  }
-
-  @Override public int getKeySizeByBlock(int[] blockIndexes) {
-    int size = 0;
-
-    for (int i = 0; i < blockIndexes.length; i++) {
-      if (blockIndexes[i] < blockKeySize.length) {
-        size += blockKeySize[blockIndexes[i]];
-      }
-    }
-    return size;
-  }
-
-  @Override public boolean equals(Object obj) {
-    if(!(obj instanceof MultiDimKeyVarLengthEquiSplitGenerator)) {
-      return false;
-    }
-    MultiDimKeyVarLengthEquiSplitGenerator o = (MultiDimKeyVarLengthEquiSplitGenerator)obj;
-    return o.dimensionsToSplit == dimensionsToSplit && super.equals(obj);
-  }
-
-  @Override public int hashCode() {
-    return super.hashCode() + dimensionsToSplit;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthVariableSplitGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthVariableSplitGenerator.java b/core/src/main/java/org/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthVariableSplitGenerator.java
deleted file mode 100644
index 83c119f..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthVariableSplitGenerator.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.keygenerator.columnar.impl;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.keygenerator.KeyGenException;
-import org.carbondata.core.keygenerator.columnar.ColumnarSplitter;
-import org.carbondata.core.keygenerator.mdkey.MultiDimKeyVarLengthGenerator;
-
-public class MultiDimKeyVarLengthVariableSplitGenerator extends MultiDimKeyVarLengthGenerator
-    implements ColumnarSplitter {
-
-  /**
-   *
-   */
-  private static final long serialVersionUID = 1L;
-
-  private int[] dimensionsToSplit;
-
-  private int[] blockKeySize;
-
-  public MultiDimKeyVarLengthVariableSplitGenerator(int[] lens, int[] dimSplit) {
-    super(lens);
-    this.dimensionsToSplit = dimSplit;
-    initialise();
-
-  }
-
-  private void initialise() {
-    int s = 0;
-    List<Set<Integer>> splitList =
-        new ArrayList<Set<Integer>>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-    Set<Integer> split = new TreeSet<Integer>();
-    splitList.add(split);
-    int dimSplitIndx = 0;
-
-    for (int i = 0; i < byteRangesForKeys.length; i++) {
-      if (s == dimensionsToSplit[dimSplitIndx]) {
-        s = 0;
-        split = new TreeSet<Integer>();
-        splitList.add(split);
-        dimSplitIndx++;
-      }
-      for (int j = 0; j < byteRangesForKeys[i].length; j++) {
-        for (int j2 = byteRangesForKeys[i][0]; j2 <= byteRangesForKeys[i][1]; j2++) {
-          split.add(j2);
-        }
-      }
-      s++;
-
-    }
-    List<Integer>[] splits = new List[splitList.size()];
-    int i = 0;
-    for (Set<Integer> splitLocal : splitList) {
-      List<Integer> range = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-      for (Integer index : splitLocal) {
-        range.add(index);
-      }
-      splits[i++] = range;
-    }
-    for (int j = 1; j < splits.length; j++) {
-      if (splits[j - 1].get(splits[j - 1].size() - 1) == splits[j].get(0)) {
-        splits[j].remove(0);
-      }
-    }
-    int[][] splitDimArray = new int[splits.length][];
-    for (int j = 0; j < splits.length; j++) {
-      int[] a = convertToArray(splits[j]);
-      splitDimArray[j] = a.length > 0 ? new int[] { a[0], a[a.length - 1] } : a;
-    }
-
-    int[][] dimBlockArray = new int[byteRangesForKeys.length][];
-    Set<Integer>[] dimBlockSet = new Set[dimBlockArray.length];
-    for (int k = 0; k < byteRangesForKeys.length; k++) {
-      int[] dimRange = byteRangesForKeys[k];
-      Set<Integer> dimBlockPosSet = new TreeSet<Integer>();
-      dimBlockSet[k] = dimBlockPosSet;
-      for (int j = 0; j < splitDimArray.length; j++) {
-        if (dimRange[0] >= splitDimArray[j][0] && dimRange[0] <= splitDimArray[j][1]) {
-          dimBlockPosSet.add(j);
-        }
-        if (dimRange[1] >= splitDimArray[j][0] && dimRange[1] <= splitDimArray[j][1]) {
-          dimBlockPosSet.add(j);
-        }
-      }
-
-    }
-
-    for (int j = 0; j < dimBlockSet.length; j++) {
-      dimBlockArray[j] = convertToArray(dimBlockSet[j]);
-    }
-
-    int[][] splitDimArrayLocalIndexes = new int[splitDimArray.length][];
-    for (int j = 0; j < splitDimArrayLocalIndexes.length; j++) {
-      splitDimArrayLocalIndexes[j] = splitDimArray[j].length > 0 ?
-          new int[] { 0, splitDimArray[j][1] - splitDimArray[j][0] } :
-          new int[0];
-    }
-
-    int[][][] byteRangesForDims = new int[byteRangesForKeys.length][][];
-    for (int j = 0; j < byteRangesForKeys.length; j++) {
-      if (dimBlockArray[j].length > 1) {
-        int[] bArray1 = splitDimArrayLocalIndexes[dimBlockArray[j][0]];
-        byteRangesForDims[j] = new int[2][2];
-        byteRangesForDims[j][0] =
-            new int[] { bArray1[bArray1.length - 1], bArray1[bArray1.length - 1] };
-        byteRangesForDims[j][1] = new int[] { 0,
-            (byteRangesForKeys[j][byteRangesForKeys[j].length - 1] - byteRangesForKeys[j][0]) - 1 };
-      } else {
-        byteRangesForDims[j] = new int[1][1];
-        int[] bArray1 = splitDimArray[dimBlockArray[j][0]];
-        byteRangesForDims[j][0] = new int[] { byteRangesForKeys[j][0] - bArray1[0],
-            byteRangesForKeys[j][1] - bArray1[0] };
-      }
-    }
-    blockKeySize = new int[splitDimArray.length];
-
-    for (int j = 0; j < blockKeySize.length; j++) {
-      blockKeySize[j] =
-          splitDimArray[j].length > 0 ? splitDimArray[j][1] - splitDimArray[j][0] + 1 : 0;
-    }
-
-  }
-
-  private int[] convertToArray(List<Integer> list) {
-    int[] ints = new int[list.size()];
-    for (int i = 0; i < ints.length; i++) {
-      ints[i] = list.get(i);
-    }
-    return ints;
-  }
-
-  private int[] convertToArray(Set<Integer> set) {
-    int[] ints = new int[set.size()];
-    int i = 0;
-    for (Iterator iterator = set.iterator(); iterator.hasNext(); ) {
-      ints[i++] = (Integer) iterator.next();
-    }
-    return ints;
-  }
-
-  @Override public byte[][] splitKey(byte[] key) {
-    byte[][] split = new byte[blockKeySize.length][];
-    int copyIndex = 0;
-    for (int i = 0; i < split.length; i++) {
-      split[i] = new byte[blockKeySize[i]];
-      System.arraycopy(key, copyIndex, split[i], 0, split[i].length);
-      copyIndex += blockKeySize[i];
-    }
-    return split;
-  }
-
-  @Override public byte[][] generateAndSplitKey(long[] keys) throws KeyGenException {
-    return splitKey(generateKey(keys));
-  }
-
-  @Override public byte[][] generateAndSplitKey(int[] keys) throws KeyGenException {
-    return splitKey(generateKey(keys));
-  }
-
-  @Override public long[] getKeyArray(byte[][] key) {
-    byte[] fullKey = new byte[getKeySizeInBytes()];
-    int copyIndex = 0;
-    for (int i = 0; i < key.length; i++) {
-      System.arraycopy(key[i], 0, fullKey, copyIndex, key[i].length);
-      copyIndex += key[i].length;
-    }
-    return getKeyArray(fullKey);
-  }
-
-  @Override public byte[] getKeyByteArray(byte[][] key) {
-    byte[] fullKey = new byte[getKeySizeInBytes()];
-    int copyIndex = 0;
-    for (int i = 0; i < key.length; i++) {
-      System.arraycopy(key[i], 0, fullKey, copyIndex, key[i].length);
-      copyIndex += key[i].length;
-    }
-    return fullKey;
-  }
-
-  @Override public byte[] getKeyByteArray(byte[][] key, int[] columnIndexes) {
-    return null;
-  }
-
-  @Override public long[] getKeyArray(byte[][] key, int[] columnIndexes) {
-    return null;
-  }
-
-  public int[] getBlockKeySize() {
-    return blockKeySize;
-  }
-
-  @Override public int getKeySizeByBlock(int[] blockIndexes) {
-    Set<Integer> selectedRanges = new HashSet<>();
-    for (int i = 0; i < blockIndexes.length; i++) {
-      int[] byteRange = byteRangesForKeys[blockIndexes[i]];
-      for (int j = byteRange[0]; j <= byteRange[1]; j++) {
-        selectedRanges.add(j);
-      }
-    }
-    return selectedRanges.size();
-  }
-
-  @Override public boolean equals(Object obj) {
-    if(!(obj instanceof MultiDimKeyVarLengthVariableSplitGenerator)) {
-      return false;
-    }
-    MultiDimKeyVarLengthVariableSplitGenerator o = (MultiDimKeyVarLengthVariableSplitGenerator)obj;
-    return Arrays.equals(o.dimensionsToSplit, dimensionsToSplit) && super.equals(obj);
-  }
-
-  @Override public int hashCode() {
-    return super.hashCode() + Arrays.hashCode(dimensionsToSplit);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/DirectDictionaryGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/DirectDictionaryGenerator.java b/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/DirectDictionaryGenerator.java
deleted file mode 100644
index 145c89a..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/DirectDictionaryGenerator.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.keygenerator.directdictionary;
-
-/**
- * The interface provides the method to generate dictionary key
- * and getting the actual value from the dictionaryKey for direct dictionary column.
- */
-public interface DirectDictionaryGenerator {
-
-  /**
-   * The method generate and returns the dictionary / surrogate key for direct dictionary column
-   *
-   * @param member The member string value
-   * @return returns dictionary/ surrogate value
-   */
-  int generateDirectSurrogateKey(String member);
-
-  /**
-   * The method returns the actual value of the requested dictionary / surrogate
-   *
-   * @param key
-   * @return dictionary actual member
-   */
-  Object getValueFromSurrogate(int key);
-
-  /**
-   * The method generate and returns the dictionary / surrogate key for direct dictionary column
-   * This Method is called while executing filter queries for getting direct surrogate members.
-   * Currently the query engine layer only supports yyyy-MM-dd HH:mm:ss date format no matter
-   * in which format the data is been stored, so while retrieving the direct surrogate value for
-   * filter member first it should be converted in date form as per above format and needs to
-   * retrieve time stamp.
-   *
-   * @param member The member string value
-   * @return returns dictionary/ surrogate value
-   */
-  int generateDirectSurrogateKey(String memberStr, String format);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/DirectDictionaryKeyGeneratorFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/DirectDictionaryKeyGeneratorFactory.java b/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/DirectDictionaryKeyGeneratorFactory.java
deleted file mode 100644
index f3633bf..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/DirectDictionaryKeyGeneratorFactory.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.keygenerator.directdictionary;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampDirectDictionaryGenerator;
-
-/**
- * Factory for DirectDictionary Key generator
- */
-public final class DirectDictionaryKeyGeneratorFactory {
-  /**
-   * private constructor
-   */
-  private DirectDictionaryKeyGeneratorFactory() {
-
-  }
-
-  /**
-   * The method returns the DirectDictionaryGenerator based for direct dictionary
-   * column based on dataType
-   *
-   * @param dataType DataType
-   * @return the generator instance
-   */
-  public static DirectDictionaryGenerator getDirectDictionaryGenerator(DataType dataType) {
-    DirectDictionaryGenerator directDictionaryGenerator = null;
-    switch (dataType) {
-      case TIMESTAMP:
-        directDictionaryGenerator = TimeStampDirectDictionaryGenerator.instance;
-        break;
-      default:
-
-    }
-    return directDictionaryGenerator;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java b/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java
deleted file mode 100644
index 19862a2..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.keygenerator.directdictionary.timestamp;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
-import org.carbondata.core.util.CarbonProperties;
-
-import static org.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityConstants.TIME_GRAN_DAY;
-import static org.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityConstants.TIME_GRAN_HOUR;
-import static org.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityConstants.TIME_GRAN_MIN;
-import static org.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityConstants.TIME_GRAN_SEC;
-
-/**
- * The class provides the method to generate dictionary key and getting the actual value from
- * the dictionaryKey for direct dictionary column for TIMESTAMP type.
- */
-public class TimeStampDirectDictionaryGenerator implements DirectDictionaryGenerator {
-
-  private TimeStampDirectDictionaryGenerator() {
-
-  }
-
-  public static TimeStampDirectDictionaryGenerator instance =
-      new TimeStampDirectDictionaryGenerator();
-
-  /**
-   * The value of 1 unit of the SECOND, MINUTE, HOUR, or DAY in millis.
-   */
-  public static final long granularityFactor;
-  /**
-   * The date timestamp to be considered as start date for calculating the timestamp
-   * java counts the number of milliseconds from  start of "January 1, 1970", this property is
-   * customized the start of position. for example "January 1, 2000"
-   */
-  public static final long cutOffTimeStamp;
-  /**
-   * Logger instance
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(TimeStampDirectDictionaryGenerator.class.getName());
-
-  /**
-   * initialization block for granularityFactor and cutOffTimeStamp
-   */
-  static {
-    String cutOffTimeStampString = CarbonProperties.getInstance()
-        .getProperty(TimeStampGranularityConstants.CARBON_CUTOFF_TIMESTAMP);
-    String timeGranularity = CarbonProperties.getInstance()
-        .getProperty(TimeStampGranularityConstants.CARBON_TIME_GRANULARITY, TIME_GRAN_SEC);
-    long granularityFactorLocal = 1000;
-    switch (timeGranularity) {
-      case TIME_GRAN_SEC:
-        granularityFactorLocal = TimeStampGranularityTypeValue.MILLIS_SECONDS.getValue();
-        break;
-      case TIME_GRAN_MIN:
-        granularityFactorLocal = TimeStampGranularityTypeValue.MILLIS_MINUTE.getValue();
-        break;
-      case TIME_GRAN_HOUR:
-        granularityFactorLocal = TimeStampGranularityTypeValue.MILLIS_HOUR.getValue();
-        break;
-      case TIME_GRAN_DAY:
-        granularityFactorLocal = TimeStampGranularityTypeValue.MILLIS_DAY.getValue();
-        break;
-      default:
-        granularityFactorLocal = 1000;
-    }
-    long cutOffTimeStampLocal;
-    if (null == cutOffTimeStampString) {
-      cutOffTimeStampLocal = -1;
-    } else {
-      try {
-        SimpleDateFormat timeParser = new SimpleDateFormat(CarbonProperties.getInstance()
-            .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-                CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-        timeParser.setLenient(false);
-        Date dateToStr = timeParser.parse(cutOffTimeStampString);
-        cutOffTimeStampLocal = dateToStr.getTime();
-      } catch (ParseException e) {
-        LOGGER.warn("Cannot convert" + cutOffTimeStampString
-            + " to Time/Long type value. Value considered for cutOffTimeStamp is -1." + e
-            .getMessage());
-        cutOffTimeStampLocal = -1;
-      }
-    }
-    granularityFactor = granularityFactorLocal;
-    cutOffTimeStamp = cutOffTimeStampLocal;
-  }
-
-  /**
-   * The method take member String as input and converts
-   * and returns the dictionary key
-   *
-   * @param memberStr date format string
-   * @return dictionary value
-   */
-  @Override public int generateDirectSurrogateKey(String memberStr) {
-    SimpleDateFormat timeParser = new SimpleDateFormat(CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-            CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-    timeParser.setLenient(false);
-    if (null == memberStr || memberStr.trim().isEmpty() || memberStr
-        .equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
-      return 1;
-    }
-    return getDirectSurrogateForMember(memberStr, timeParser);
-  }
-
-  /**
-   * The method take member String as input and converts
-   * and returns the dictionary key
-   *
-   * @param memberStr date format string
-   * @return dictionary value
-   */
-  public int generateDirectSurrogateKey(String memberStr, String format) {
-    if (null == format) {
-      return generateDirectSurrogateKeyForNonTimestampType(memberStr);
-    } else {
-      SimpleDateFormat timeParser = new SimpleDateFormat(format);
-      timeParser.setLenient(false);
-      if (null == memberStr || memberStr.trim().isEmpty() || memberStr
-          .equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
-        return 1;
-      }
-      return getDirectSurrogateForMember(memberStr, timeParser);
-    }
-  }
-
-  private int getDirectSurrogateForMember(String memberStr, SimpleDateFormat timeParser) {
-    Date dateToStr = null;
-    try {
-      dateToStr = timeParser.parse(memberStr);
-    } catch (ParseException e) {
-      LOGGER.debug(
-          "Cannot convert " + memberStr + " to Time/Long type value. Value considered as null." + e
-              .getMessage());
-      dateToStr = null;
-    }
-    //adding +2 to reserve the first cuttOffDiff value for null or empty date
-    if (null == dateToStr) {
-      return 1;
-    } else {
-      return generateKey(dateToStr.getTime());
-    }
-  }
-
-  /**
-   * The method take dictionary key as input and returns the
-   *
-   * @param key
-   * @return member value/actual value Date
-   */
-  @Override public Object getValueFromSurrogate(int key) {
-    if (key == 1) {
-      return null;
-    }
-    long timeStamp = 0;
-    if (cutOffTimeStamp >= 0) {
-      timeStamp = ((key - 2) * granularityFactor + cutOffTimeStamp);
-    } else {
-      timeStamp = (key - 2) * granularityFactor;
-    }
-    return timeStamp * 1000L;
-  }
-
-  private int generateDirectSurrogateKeyForNonTimestampType(String memberStr) {
-    long timeValue = -1;
-    try {
-      timeValue = Long.valueOf(memberStr) / 1000;
-    } catch (NumberFormatException e) {
-      LOGGER.debug(
-          "Cannot convert " + memberStr + " Long type value. Value considered as null." + e
-              .getMessage());
-    }
-    if (timeValue == -1) {
-      return 1;
-    } else {
-      return generateKey(timeValue);
-    }
-  }
-
-  private int generateKey(long timeValue) {
-    if (cutOffTimeStamp >= 0) {
-      int keyValue = (int) ((timeValue - cutOffTimeStamp) / granularityFactor);
-      return keyValue < 0 ? 1 : keyValue + 2;
-    } else {
-      int keyValue = (int) (timeValue / granularityFactor);
-      return keyValue < 0 ? 1 : keyValue + 2;
-    }
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java b/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java
deleted file mode 100644
index decceae..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.keygenerator.directdictionary.timestamp;
-
-/**
- * Constant related to timestamp conversion
- */
-public interface TimeStampGranularityConstants {
-
-  /**
-   * The property to set the date to be considered as start date for calculating the timestamp
-   * java counts the number of milliseconds from  start of "January 1, 1970", this property is
-   * customized the start of position. for example "January 1, 2000"
-   */
-  public static final String CARBON_CUTOFF_TIMESTAMP = "carbon.cutOffTimestamp";
-  /**
-   * The property to set the timestamp (ie milis) conversion to the SECOND, MINUTE, HOUR
-   * or DAY level
-   */
-  public static final String CARBON_TIME_GRANULARITY = "carbon.timegranularity";
-
-  /**
-   * Second level key
-   */
-  String TIME_GRAN_SEC = "SECOND";
-  /**
-   * minute level key
-   */
-  String TIME_GRAN_MIN = "MINUTE";
-  /**
-   * hour level key
-   */
-  String TIME_GRAN_HOUR = "HOUR";
-  /**
-   * day level key
-   */
-  String TIME_GRAN_DAY = "DAY";
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityTypeValue.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityTypeValue.java b/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityTypeValue.java
deleted file mode 100644
index 4c227f9..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityTypeValue.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.keygenerator.directdictionary.timestamp;
-
-/**
- * Enum constant having the milli second for second, minute, hour, day
- */
-public enum TimeStampGranularityTypeValue {
-  /**
-   * 1 second value in ms
-   */
-  MILLIS_SECONDS(1000),
-  /**
-   * 1 minute value in ms
-   */
-  MILLIS_MINUTE(1000 * 60),
-  /**
-   * 1 hour value in ms
-   */
-  MILLIS_HOUR(1000 * 60 * 60),
-  /**
-   * 1 day value in ms
-   */
-  MILLIS_DAY(1000 * 60 * 60 * 24);
-
-  /**
-   * enum constant value
-   */
-  private final long value;
-
-  /**
-   * constructor of enum constant
-   *
-   * @param value
-   */
-  private TimeStampGranularityTypeValue(long value) {
-    this.value = value;
-  }
-
-  /**
-   * @return return the value of enum constant
-   */
-  public long getValue() {
-    return this.value;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java b/core/src/main/java/org/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
deleted file mode 100644
index 0d90256..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.keygenerator.factory;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.keygenerator.KeyGenerator;
-import org.carbondata.core.keygenerator.mdkey.MultiDimKeyVarLengthGenerator;
-import org.carbondata.core.util.CarbonUtil;
-
-public final class KeyGeneratorFactory {
-  private KeyGeneratorFactory() {
-
-  }
-
-  public static KeyGenerator getKeyGenerator(int[] dimesion) {
-    int[] incrementedCardinality;
-    boolean isFullyFilled =
-        Boolean.parseBoolean(CarbonCommonConstants.IS_FULLY_FILLED_BITS_DEFAULT_VALUE);
-    if (!isFullyFilled) {
-      incrementedCardinality = CarbonUtil.getIncrementedCardinality(dimesion);
-    } else {
-      incrementedCardinality = CarbonUtil.getIncrementedCardinalityFullyFilled(dimesion);
-    }
-    return new MultiDimKeyVarLengthGenerator(incrementedCardinality);
-  }
-
-  /**
-   *
-   * @param dimCardinality : dimension cardinality
-   * @param columnSplits : No of column in each block
-   * @return keygenerator
-   */
-  public static KeyGenerator getKeyGenerator(int[] dimCardinality, int[] columnSplits) {
-    int[] dimsBitLens = CarbonUtil.getDimensionBitLength(dimCardinality, columnSplits);
-
-    return new MultiDimKeyVarLengthGenerator(dimsBitLens);
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/mdkey/AbstractKeyGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/mdkey/AbstractKeyGenerator.java b/core/src/main/java/org/carbondata/core/keygenerator/mdkey/AbstractKeyGenerator.java
deleted file mode 100644
index 3b4d6f8..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/mdkey/AbstractKeyGenerator.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.keygenerator.mdkey;
-
-import org.carbondata.core.keygenerator.KeyGenerator;
-
-public abstract class AbstractKeyGenerator implements KeyGenerator {
-
-  private static final long serialVersionUID = -6675293078575359769L;
-
-  @Override public int compare(byte[] byte1, byte[] byte2) {
-    // Short circuit equal case
-    if (byte1 == byte2) {
-      return 0;
-    }
-    // Bring WritableComparator code local
-    int i = 0;
-    int j = 0;
-    for (; i < byte1.length && j < byte2.length; i++, j++) {
-      int a = (byte1[i] & 0xff);
-      int b = (byte2[j] & 0xff);
-      if (a != b) {
-        return a - b;
-      }
-    }
-    return 0;
-  }
-
-  public int compare(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2,
-      int length2) {
-    length1 += offset1;
-    length2 += offset2;
-    // Bring WritableComparator code local
-    for (; offset1 < length1 && offset2 < length2; offset1++, offset2++) {
-      int a = (buffer1[offset1] & 0xff);
-      int b = (buffer2[offset2] & 0xff);
-      if (a != b) {
-        return a - b;
-      }
-    }
-    return 0;
-  }
-
-  @Override public void setProperty(Object key, Object value) {
-    /**
-     * No implementation required.
-     */
-  }
-
-  @Override public int getKeySizeInBytes() {
-    return 0;
-  }
-
-  @Override public int[] getKeyByteOffsets(int index) {
-    return null;
-  }
-
-  @Override public int getDimCount() {
-    return 0;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/mdkey/Bits.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/mdkey/Bits.java b/core/src/main/java/org/carbondata/core/keygenerator/mdkey/Bits.java
deleted file mode 100644
index 2ce64ba..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/mdkey/Bits.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.keygenerator.mdkey;
-
-import java.io.Serializable;
-import java.util.Arrays;
-
-public class Bits implements Serializable {
-
-  /**
-   * Bits MAX_LENGTH
-   */
-  private static final int MAX_LENGTH = 63;
-  private static final int LONG_LENGTH = 64;
-  /**
-   * serialVersionUID.
-   */
-  private static final long serialVersionUID = 1555114921503304849L;
-  /**
-   * LONG_MAX.
-   */
-  private static final long LONG_MAX = 0x7fffffffffffffffL;
-  /**
-   * length.
-   */
-  private int length = 100;
-  /**
-   * lens.
-   */
-  private int[] lens;
-  /**
-   * wsize.
-   */
-  private int wsize;
-  /**
-   * byteSize.
-   */
-  private int byteSize;
-
-  public Bits(int[] lens) {
-    this.lens = lens;
-    this.length = getTotalLength(lens);
-
-    wsize = length / LONG_LENGTH;
-    byteSize = length / 8;
-
-    if (length % LONG_LENGTH != 0) {
-      wsize++;
-    }
-
-    if (length % 8 != 0) {
-      byteSize++;
-    }
-  }
-
-  public int getByteSize() {
-    return byteSize;
-  }
-
-  private int getTotalLength(int[] lens) {
-    int tLen = 0;
-    for (int len : lens) {
-      tLen += len;
-    }
-    return tLen;
-  }
-
-  public int getDimCount() {
-    return lens.length;
-  }
-
-  /**
-   * Return the start and end Byte offsets of dimension in the MDKey. int []
-   * {start, end}
-   */
-  public int[] getKeyByteOffsets(int index) {
-    int prefixPaddingBits = length % 8 == 0 ? 0 : (8 - length % 8);
-
-    int priorLen = prefixPaddingBits;
-    int start = 0;
-    int end = 0;
-
-    // Calculate prior length for all previous keys
-    for (int i = 0; i < index; i++) {
-      priorLen += lens[i];
-    }
-
-    // Start
-    start = priorLen / 8;
-
-    int tillKeyLength = priorLen + lens[index];
-
-    // End key
-    end = (tillKeyLength) / 8;
-
-    // Consider if end is the last bit. No need to include the next byte.
-    if (tillKeyLength % 8 == 0) {
-      end--;
-    }
-
-    return new int[] { start, end };
-  }
-
-  protected long[] get(long[] keys) {
-    long[] words = new long[wsize];
-    int ll = 0;
-    for (int i = lens.length - 1; i >= 0; i--) {
-
-      long val = keys[i];
-
-      int idx = ll >> 6;// divide by 64 to get the new word index
-      int position = ll & 0x3f;// to ignore sign bit and consider the remaining
-      val = val & (LONG_MAX >> (MAX_LENGTH - lens[i]));// To control the
-      // logic so that
-      // any val do not
-      // exceed the
-      // cardinality
-      long mask = (val << position);
-      long word = words[idx];
-      words[idx] = (word | mask);
-      ll += lens[i];
-
-      int nextIndex = ll >> 6;// This is divide by 64
-
-      if (nextIndex != idx) {
-        int consideredBits = lens[i] - ll & 0x3f;
-        if (consideredBits < lens[i]) //Check for spill over only if all the bits are not considered
-        {
-          mask = (val >> (lens[i] - ll & 0x3f));//& (0x7fffffffffffffffL >> (0x3f-pos));
-          word = words[nextIndex];
-          words[nextIndex] = (word | mask);
-        }
-      }
-
-    }
-
-    return words;
-  }
-
-  protected long[] get(int[] keys) {
-    long[] words = new long[wsize];
-    int ll = 0;
-    for (int i = lens.length - 1; i >= 0; i--) {
-
-      long val = keys[i];
-
-      int index = ll >> 6;// divide by 64 to get the new word index
-      int pos = ll & 0x3f;// to ignore sign bit and consider the remaining
-      val = val & (LONG_MAX >> (MAX_LENGTH - lens[i]));// To control the
-      // logic so that
-      // any val do not
-      // exceed the
-      // cardinality
-      long mask = (val << pos);
-      long word = words[index];
-      words[index] = (word | mask);
-      ll += lens[i];
-
-      int nextIndex = ll >> 6;// This is divide by 64
-
-      if (nextIndex != index) {
-        int consideredBits = lens[i] - ll & 0x3f;
-        if (consideredBits < lens[i]) //Check for spill over only if all the bits are not considered
-        {
-          // Check for spill over
-          mask = (val >> (lens[i] - ll & 0x3f));
-          word = words[nextIndex];
-          words[nextIndex] = (word | mask);
-        }
-      }
-
-    }
-
-    return words;
-  }
-
-  private long[] getArray(long[] words) {
-    long[] vals = new long[lens.length];
-    int ll = 0;
-    for (int i = lens.length - 1; i >= 0; i--) {
-
-      int index = ll >> 6;
-      int pos = ll & 0x3f;
-      long val = words[index];
-      long mask = (LONG_MAX >>> (MAX_LENGTH - lens[i]));
-      mask = mask << pos;
-      vals[i] = (val & mask);
-      vals[i] >>>= pos;
-      ll += lens[i];
-
-      int nextIndex = ll >> 6;
-      if (nextIndex != index) {
-        pos = ll & 0x3f;
-        if (pos != 0) // Number of bits pending for current key is zero, no spill over
-        {
-          mask = (LONG_MAX >>> (MAX_LENGTH - pos));
-          val = words[nextIndex];
-          vals[i] = vals[i] | ((val & mask) << (lens[i] - pos));
-        }
-      }
-    }
-    return vals;
-  }
-
-  public byte[] getBytes(long[] keys) {
-
-    long[] words = get(keys);
-
-    return getBytesVal(words);
-  }
-
-  private byte[] getBytesVal(long[] words) {
-    int length = 8;
-    byte[] bytes = new byte[byteSize];
-
-    int l = byteSize - 1;
-    for (int i = 0; i < words.length; i++) {
-      long val = words[i];
-
-      for (int j = length - 1; j > 0 && l > 0; j--) {
-        bytes[l] = (byte) val;
-        val >>>= 8;
-        l--;
-      }
-      bytes[l] = (byte) val;
-      l--;
-    }
-    return bytes;
-  }
-
-  public byte[] getBytes(int[] keys) {
-
-    long[] words = get(keys);
-
-    return getBytesVal(words);
-  }
-
-  public long[] getKeyArray(byte[] key, int offset) {
-
-    int length = 8;
-    int ls = byteSize;
-    long[] words = new long[wsize];
-    for (int i = 0; i < words.length; i++) {
-      long l = 0;
-      ls -= 8;
-      int m = 0;
-      if (ls < 0) {
-        m = ls + length;
-        ls = 0;
-      } else {
-        m = ls + 8;
-      }
-      for (int j = ls; j < m; j++) {
-        l <<= 8;
-        l ^= key[j + offset] & 0xFF;
-      }
-      words[i] = l;
-    }
-
-    return getArray(words);
-
-  }
-
-  public long[] getKeyArray(byte[] key, int[] maskByteRanges) {
-
-    int length = 8;
-    int ls = byteSize;
-    long[] words = new long[wsize];
-    for (int i = 0; i < words.length; i++) {
-      long l = 0;
-      ls -= 8;
-      int m2 = 0;
-      if (ls < 0) {
-        m2 = ls + length;
-        ls = 0;
-      } else {
-        m2 = ls + 8;
-      }
-      if (maskByteRanges == null) {
-        for (int j = ls; j < m2; j++) {
-          l <<= 8;
-          l ^= key[j] & 0xFF;
-        }
-      } else {
-        for (int j = ls; j < m2; j++) {
-          l <<= 8;
-          if (maskByteRanges[j] != -1) {
-            l ^= key[maskByteRanges[j]] & 0xFF;
-          }
-        }
-      }
-      words[i] = l;
-    }
-
-    return getArray(words);
-
-  }
-
-  @Override public boolean equals(Object obj) {
-    if (obj instanceof Bits) {
-      Bits other = (Bits) obj;
-      return Arrays.equals(lens, other.lens);
-    }
-    return false;
-  }
-
-  @Override public int hashCode() {
-    return Arrays.hashCode(lens);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/mdkey/MultiDimKeyVarLengthGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/mdkey/MultiDimKeyVarLengthGenerator.java b/core/src/main/java/org/carbondata/core/keygenerator/mdkey/MultiDimKeyVarLengthGenerator.java
deleted file mode 100644
index 1aa2b81..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/mdkey/MultiDimKeyVarLengthGenerator.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.keygenerator.mdkey;
-
-import org.carbondata.core.keygenerator.KeyGenException;
-
-public class MultiDimKeyVarLengthGenerator extends AbstractKeyGenerator {
-
-  private static final long serialVersionUID = 9134778127271586515L;
-  /**
-   *
-   */
-  protected int[][] byteRangesForKeys;
-  private Bits bits;
-  private int startAndEndKeySizeWithPrimitives;
-
-  public MultiDimKeyVarLengthGenerator(int[] lens) {
-    bits = new Bits(lens);
-    byteRangesForKeys = new int[lens.length][];
-    int keys = lens.length;
-    for (int i = 0; i < keys; i++) {
-      byteRangesForKeys[i] = bits.getKeyByteOffsets(i);
-    }
-  }
-
-  @Override public byte[] generateKey(long[] keys) throws KeyGenException {
-
-    return bits.getBytes(keys);
-  }
-
-  @Override public byte[] generateKey(int[] keys) throws KeyGenException {
-
-    return bits.getBytes(keys);
-  }
-
-  @Override public long[] getKeyArray(byte[] key) {
-
-    return bits.getKeyArray(key, 0);
-  }
-
-  @Override public long[] getKeyArray(byte[] key, int offset) {
-
-    return bits.getKeyArray(key, offset);
-  }
-
-  @Override public long getKey(byte[] key, int index) {
-
-    return bits.getKeyArray(key, 0)[index];
-  }
-
-  public int getKeySizeInBytes() {
-    return bits.getByteSize();
-  }
-
-  @Override public long[] getSubKeyArray(byte[] key, int index, int size) {
-    if (index < 0 || size == 0) {
-      return null;
-    }
-    long[] keys = bits.getKeyArray(key, 0);
-    long[] rtn = new long[size];
-    System.arraycopy(keys, index, rtn, 0, size);
-    return rtn;
-  }
-
-  @Override public int[] getKeyByteOffsets(int index) {
-    return byteRangesForKeys[index];
-  }
-
-  @Override public int getDimCount() {
-
-    return bits.getDimCount();
-  }
-
-  @Override public boolean equals(Object obj) {
-    if (obj instanceof MultiDimKeyVarLengthGenerator) {
-      MultiDimKeyVarLengthGenerator other = (MultiDimKeyVarLengthGenerator) obj;
-      return bits.equals(other.bits);
-    }
-
-    return false;
-  }
-
-  @Override public int hashCode() {
-    return bits.hashCode();
-  }
-
-  @Override public long[] getKeyArray(byte[] key, int[] maskedByteRanges) {
-    return bits.getKeyArray(key, maskedByteRanges);
-  }
-
-  @Override public int getStartAndEndKeySizeWithOnlyPrimitives() {
-    return startAndEndKeySizeWithPrimitives;
-  }
-
-  @Override
-  public void setStartAndEndKeySizeWithOnlyPrimitives(int startAndEndKeySizeWithPrimitives) {
-    this.startAndEndKeySizeWithPrimitives = startAndEndKeySizeWithPrimitives;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/mdkey/NumberCompressor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/mdkey/NumberCompressor.java b/core/src/main/java/org/carbondata/core/keygenerator/mdkey/NumberCompressor.java
deleted file mode 100644
index 7b1cd88..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/mdkey/NumberCompressor.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.keygenerator.mdkey;
-
-/**
- * It compresses the data as per max cardinality. It takes only the required bits for each key.
- */
-public class NumberCompressor {
-
-  /**
-   * Bits MAX_LENGTH
-   */
-  private static final int MAX_LENGTH = 63;
-
-  private static final int LONG_LENGTH = 64;
-
-  private static final int BYTE_LENGTH = 8;
-
-  /**
-   * LONG_MAX.
-   */
-  private static final long LONG_MAX = 0x7fffffffffffffffL;
-
-  private byte bitsLength;
-
-  public NumberCompressor(int cardinaity) {
-    bitsLength = (byte) Long.toBinaryString(cardinaity).length();
-  }
-
-  public byte[] compress(int[] keys) {
-    int[] sizes = getWordsAndByteSize(keys.length);
-    long[] words = get(keys, sizes[0]);
-
-    return getByteValues(sizes, words);
-  }
-
-  private byte[] getByteValues(int[] sizes, long[] words) {
-    byte[] bytes = new byte[sizes[1]];
-
-    int l = sizes[1] - 1;
-    for (int i = 0; i < words.length; i++) {
-      long val = words[i];
-
-      for (int j = BYTE_LENGTH - 1; j > 0 && l > 0; j--) {
-        bytes[l] = (byte) val;
-        val >>>= 8;
-        l--;
-      }
-      bytes[l] = (byte) val;
-      l--;
-    }
-    return bytes;
-  }
-
-  protected long[] get(int[] keys, int wsize) {
-    long[] words = new long[wsize];
-    int ll = 0;
-    int index = 0;
-    int pos = 0;
-    int nextIndex = 0;
-    for (int i = keys.length - 1; i >= 0; i--) {
-
-      long val = keys[i];
-
-      index = ll >> 6;// divide by 64 to get the new word index
-      pos = ll & 0x3f;// to ignore sign bit and consider the remaining
-      //            val = val & controlBits;
-      long mask = (val << pos);
-      long word = words[index];
-      words[index] = (word | mask);
-      ll += bitsLength;
-
-      nextIndex = ll >> 6;// This is divide by 64
-
-      if (nextIndex != index) {
-        int consideredBits = bitsLength - ll & 0x3f;
-        if (consideredBits < bitsLength) // Check for spill over only if
-        // all the bits are not
-        // considered
-        {
-          // Check for spill over
-          mask = (val >> (bitsLength - ll & 0x3f));
-          words[nextIndex] |= mask;
-        }
-      }
-
-    }
-    return words;
-  }
-
-  protected long[] get(byte[] keys, int wsize) {
-    long[] words = new long[wsize];
-    int ll = 0;
-    long val = 0L;
-    for (int i = keys.length - 1; i >= 0; ) {
-
-      int size = i;
-      val = 0L;
-      for (int j = i + 1; j <= size; ) {
-        val <<= BYTE_LENGTH;
-        val ^= keys[j++] & 0xFF;
-        i--;
-      }
-      int index = ll >> 6;// divide by 64 to get the new word index
-      words[index] |= (val << (ll & 0x3f));
-      ll += bitsLength;
-
-      int nextIndex = ll >> 6;// This is divide by 64
-
-      if (nextIndex != index) {
-        int consideredBits = bitsLength - ll & 0x3f;
-        if (consideredBits < bitsLength) // Check for spill over only if
-        // all the bits are not
-        // considered
-        {
-          // Check for spill over
-          words[nextIndex] |= (val >> (bitsLength - ll & 0x3f));
-        }
-      }
-
-    }
-    return words;
-  }
-
-  public int[] unCompress(byte[] key) {
-    int ls = key.length;
-    int arrayLength = (ls * BYTE_LENGTH) / bitsLength;
-    long[] words = new long[getWordsSizeFromBytesSize(ls)];
-    unCompressVal(key, ls, words);
-    return getArray(words, arrayLength);
-  }
-
-  private void unCompressVal(byte[] key, int ls, long[] words) {
-    for (int i = 0; i < words.length; i++) {
-      long l = 0;
-      ls -= BYTE_LENGTH;
-      int m = 0;
-      if (ls < 0) {
-        m = ls + BYTE_LENGTH;
-        ls = 0;
-      } else {
-        m = ls + BYTE_LENGTH;
-      }
-      for (int j = ls; j < m; j++) {
-        l <<= BYTE_LENGTH;
-        l ^= key[j] & 0xFF;
-      }
-      words[i] = l;
-    }
-  }
-
-  private int[] getArray(long[] words, int arrayLength) {
-    int[] vals = new int[arrayLength];
-    int ll = 0;
-    long globalMask = LONG_MAX >>> (MAX_LENGTH - bitsLength);
-    for (int i = arrayLength - 1; i >= 0; i--) {
-
-      int index = ll >> 6;
-      int pos = ll & 0x3f;
-      long val = words[index];
-      long mask = globalMask << pos;
-      long value = (val & mask) >>> pos;
-      ll += bitsLength;
-
-      int nextIndex = ll >> 6;
-      if (nextIndex != index) {
-        pos = ll & 0x3f;
-        if (pos != 0) // Number of bits pending for current key is zero, no spill over
-        {
-          mask = (LONG_MAX >>> (MAX_LENGTH - pos));
-          val = words[nextIndex];
-          value = value | ((val & mask) << (bitsLength - pos));
-        }
-      }
-      vals[i] = (int) value;
-    }
-    return vals;
-  }
-
-  private int[] getWordsAndByteSize(int arrayLength) {
-    int length = arrayLength * bitsLength;
-    int wsize = length / LONG_LENGTH;
-    int byteSize = length / BYTE_LENGTH;
-
-    if (length % LONG_LENGTH != 0) {
-      wsize++;
-    }
-
-    if (length % BYTE_LENGTH != 0) {
-      byteSize++;
-    }
-    return new int[] { wsize, byteSize };
-  }
-
-  private int getWordsSizeFromBytesSize(int byteSize) {
-    int wsize = byteSize / BYTE_LENGTH;
-    if (byteSize % BYTE_LENGTH != 0) {
-      wsize++;
-    }
-    return wsize;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/load/BlockDetails.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/load/BlockDetails.java b/core/src/main/java/org/carbondata/core/load/BlockDetails.java
deleted file mode 100644
index 39c39a0..0000000
--- a/core/src/main/java/org/carbondata/core/load/BlockDetails.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.load;
-
-import java.io.Serializable;
-
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-
-/**
- * blocks info
- */
-public class BlockDetails implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = 2293906691860002339L;
-  //block offset
-  private long blockOffset;
-  //block length
-  private long blockLength;
-  //file path which block belong to
-  private String filePath;
-  // locations where this block exists
-  private String[] locations;
-
-  public BlockDetails(String filePath, long blockOffset, long blockLength, String[] locations) {
-    this.filePath = filePath;
-    this.blockOffset = blockOffset;
-    this.blockLength = blockLength;
-    this.locations = locations;
-  }
-
-  public long getBlockOffset() {
-    return blockOffset;
-  }
-
-  public void setBlockOffset(long blockOffset) {
-    this.blockOffset = blockOffset;
-  }
-
-  public long getBlockLength() {
-    return blockLength;
-  }
-
-  public void setBlockLength(long blockLength) {
-    this.blockLength = blockLength;
-  }
-
-  public String getFilePath() {
-    return FileFactory.getUpdatedFilePath(filePath);
-  }
-
-  public void setFilePath(String filePath) {
-    this.filePath = filePath;
-  }
-
-  public String[] getLocations() {
-    return locations;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/load/LoadMetadataDetails.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/load/LoadMetadataDetails.java b/core/src/main/java/org/carbondata/core/load/LoadMetadataDetails.java
deleted file mode 100644
index f0b5ac9..0000000
--- a/core/src/main/java/org/carbondata/core/load/LoadMetadataDetails.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.load;
-
-import java.io.Serializable;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-public class LoadMetadataDetails implements Serializable {
-
-  private static final long serialVersionUID = 1106104914918491724L;
-  private String timestamp;
-  private String loadStatus;
-  private String loadName;
-  private String partitionCount;
-
-  /**
-   * LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(LoadMetadataDetails.class.getName());
-
-  private static final SimpleDateFormat parser =
-      new SimpleDateFormat(CarbonCommonConstants.CARBON_TIMESTAMP);
-  /**
-   * Segment modification or deletion time stamp
-   */
-  private String modificationOrdeletionTimesStamp;
-  private String loadStartTime;
-
-  private String mergedLoadName;
-  /**
-   * visibility is used to determine whether to the load is visible or not.
-   */
-  private String visibility = "true";
-
-  /**
-   * To know if the segment is a major compacted segment or not.
-   */
-  private String majorCompacted;
-
-  public String getPartitionCount() {
-    return partitionCount;
-  }
-
-  public void setPartitionCount(String partitionCount) {
-    this.partitionCount = partitionCount;
-  }
-
-  public String getTimestamp() {
-    return timestamp;
-  }
-
-  public void setTimestamp(String timestamp) {
-    this.timestamp = timestamp;
-  }
-
-  public String getLoadStatus() {
-    return loadStatus;
-  }
-
-  public void setLoadStatus(String loadStatus) {
-    this.loadStatus = loadStatus;
-  }
-
-  public String getLoadName() {
-    return loadName;
-  }
-
-  public void setLoadName(String loadName) {
-    this.loadName = loadName;
-  }
-
-  /**
-   * @return the modificationOrdeletionTimesStamp
-   */
-  public String getModificationOrdeletionTimesStamp() {
-    return modificationOrdeletionTimesStamp;
-  }
-
-  /**
-   * @param modificationOrdeletionTimesStamp the modificationOrdeletionTimesStamp to set
-   */
-  public void setModificationOrdeletionTimesStamp(String modificationOrdeletionTimesStamp) {
-    this.modificationOrdeletionTimesStamp = modificationOrdeletionTimesStamp;
-  }
-
-  /* (non-Javadoc)
-   * @see java.lang.Object#hashCode()
-   */
-  @Override public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + ((loadName == null) ? 0 : loadName.hashCode());
-    return result;
-  }
-
-  /* (non-Javadoc)
-   * @see java.lang.Object#equals(java.lang.Object)
-   */
-  @Override public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-
-    }
-    if (!(obj instanceof LoadMetadataDetails)) {
-      return false;
-    }
-    LoadMetadataDetails other = (LoadMetadataDetails) obj;
-    if (loadName == null) {
-      if (other.loadName != null) {
-        return false;
-      }
-    } else if (!loadName.equals(other.loadName)) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * @return the startLoadTime
-   */
-  public String getLoadStartTime() {
-    return loadStartTime;
-  }
-
-  /**
-   * return loadStartTime
-   * @return
-   */
-  public long getLoadStartTimeAsLong() {
-    return getTimeStamp(loadStartTime);
-  }
-
-  /**
-   * returns load start time as long value
-   * @param loadStartTime
-   * @return
-   */
-  private Long getTimeStamp(String loadStartTime) {
-    if (loadStartTime.isEmpty()) {
-      return null;
-    }
-
-    Date dateToStr = null;
-    try {
-      dateToStr = parser.parse(loadStartTime);
-      return dateToStr.getTime() * 1000;
-    } catch (ParseException e) {
-      LOGGER.error("Cannot convert" + loadStartTime + " to Time/Long type value" + e.getMessage());
-      return null;
-    }
-  }
-  /**
-   * @param loadStartTime
-   */
-  public void setLoadStartTime(String loadStartTime) {
-    this.loadStartTime = loadStartTime;
-  }
-
-  /**
-   * @return the mergedLoadName
-   */
-  public String getMergedLoadName() {
-    return mergedLoadName;
-  }
-
-  /**
-   * @param mergedLoadName the mergedLoadName to set
-   */
-  public void setMergedLoadName(String mergedLoadName) {
-    this.mergedLoadName = mergedLoadName;
-  }
-
-  /**
-   * @return the visibility
-   */
-  public String getVisibility() {
-    return visibility;
-  }
-
-  /**
-   * @param visibility the visibility to set
-   */
-  public void setVisibility(String visibility) {
-    this.visibility = visibility;
-  }
-
-  /**
-   * Return true if it is a major compacted segment.
-   * @return
-   */
-  public String isMajorCompacted() {
-    return majorCompacted;
-  }
-
-  /**
-   * Set true if it is a major compacted segment.
-   * @param majorCompacted
-   */
-  public void setMajorCompacted(String majorCompacted) {
-    this.majorCompacted = majorCompacted;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/metadata/BlockletInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/metadata/BlockletInfo.java b/core/src/main/java/org/carbondata/core/metadata/BlockletInfo.java
deleted file mode 100644
index 2e04d53..0000000
--- a/core/src/main/java/org/carbondata/core/metadata/BlockletInfo.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.metadata;
-
-public class BlockletInfo {
-  /**
-   * fileName.
-   */
-  private String fileName;
-
-  /**
-   * keyOffset.
-   */
-  private long keyOffset;
-
-  /**
-   * measureOffset.
-   */
-  private long[] measureOffset;
-
-  /**
-   * measureLength.
-   */
-  private int[] measureLength;
-
-  /**
-   * keyLength.
-   */
-  private int keyLength;
-
-  /**
-   * numberOfKeys.
-   */
-  private int numberOfKeys;
-
-  /**
-   * startKey.
-   */
-  private byte[] startKey;
-
-  /**
-   * endKey.
-   */
-  private byte[] endKey;
-
-  /**
-   * getFileName().
-   *
-   * @return String.
-   */
-  public String getFileName() {
-    return fileName;
-  }
-
-  /**
-   * setFileName.
-   */
-  public void setFileName(String fileName) {
-    this.fileName = fileName;
-  }
-
-  /**
-   * getKeyOffset.
-   *
-   * @return long.
-   */
-  public long getKeyOffset() {
-    return keyOffset;
-  }
-
-  /**
-   * setKeyOffset.
-   *
-   * @param keyOffset
-   */
-  public void setKeyOffset(long keyOffset) {
-    this.keyOffset = keyOffset;
-  }
-
-  /**
-   * getMeasureLength
-   *
-   * @return int[].
-   */
-  public int[] getMeasureLength() {
-    return measureLength;
-  }
-
-  /**
-   * setMeasureLength.
-   *
-   * @param measureLength
-   */
-  public void setMeasureLength(int[] measureLength) {
-    this.measureLength = measureLength;
-  }
-
-  /**
-   * getKeyLength.
-   *
-   * @return
-   */
-  public int getKeyLength() {
-    return keyLength;
-  }
-
-  /**
-   * setKeyLength.
-   */
-  public void setKeyLength(int keyLength) {
-    this.keyLength = keyLength;
-  }
-
-  /**
-   * getMeasureOffset.
-   *
-   * @return long[].
-   */
-  public long[] getMeasureOffset() {
-    return measureOffset;
-  }
-
-  /**
-   * setMeasureOffset.
-   *
-   * @param measureOffset
-   */
-  public void setMeasureOffset(long[] measureOffset) {
-    this.measureOffset = measureOffset;
-  }
-
-  /**
-   * getNumberOfKeys()
-   *
-   * @return int.
-   */
-  public int getNumberOfKeys() {
-    return numberOfKeys;
-  }
-
-  /**
-   * setNumberOfKeys.
-   *
-   * @param numberOfKeys
-   */
-  public void setNumberOfKeys(int numberOfKeys) {
-    this.numberOfKeys = numberOfKeys;
-  }
-
-  /**
-   * getStartKey().
-   *
-   * @return byte[].
-   */
-  public byte[] getStartKey() {
-    return startKey;
-  }
-
-  /**
-   * setStartKey.
-   *
-   * @param startKey
-   */
-  public void setStartKey(byte[] startKey) {
-    this.startKey = startKey;
-  }
-
-  /**
-   * getEndKey().
-   *
-   * @return byte[].
-   */
-  public byte[] getEndKey() {
-    return endKey;
-  }
-
-  /**
-   * setEndKey.
-   *
-   * @param endKey
-   */
-  public void setEndKey(byte[] endKey) {
-    this.endKey = endKey;
-  }
-}


[26/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/NoDictionaryTypeVisitor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/NoDictionaryTypeVisitor.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/NoDictionaryTypeVisitor.java
new file mode 100644
index 0000000..e7db0dc
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/NoDictionaryTypeVisitor.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.resolver.resolverinfo.visitor;
+
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.DimColumnFilterInfo;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+
+public class NoDictionaryTypeVisitor implements ResolvedFilterInfoVisitorIntf {
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(NoDictionaryTypeVisitor.class.getName());
+
+  /**
+   * Visitor Method will update the filter related details in visitableObj, For no dictionary
+   * type columns the filter members will resolved directly, no need to look up in dictionary
+   * since it will not be part of dictionary, directly the actual data can be converted as
+   * byte[] and can be set. this type of encoding is effective when the particular column
+   * is having very high cardinality.
+   *
+   * @param visitableObj
+   * @param metadata
+   * @throws FilterUnsupportedException,if exception occurs while evaluating
+   * filter models.
+   */
+  public void populateFilterResolvedInfo(DimColumnResolvedFilterInfo visitableObj,
+      FilterResolverMetadata metadata) throws FilterUnsupportedException {
+    DimColumnFilterInfo resolvedFilterObject = null;
+    List<String> evaluateResultListFinal;
+    try {
+      evaluateResultListFinal = metadata.getExpression().evaluate(null).getListAsString();
+      // Adding default  null member inorder to not display the same while
+      // displaying the report as per hive compatibility.
+      if (!metadata.isIncludeFilter() && !evaluateResultListFinal
+          .contains(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
+        evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
+      }
+    } catch (FilterIllegalMemberException e) {
+      throw new FilterUnsupportedException(e);
+    }
+    resolvedFilterObject = FilterUtil
+        .getNoDictionaryValKeyMemberForFilter(metadata.getTableIdentifier(),
+            metadata.getColumnExpression(), evaluateResultListFinal, metadata.isIncludeFilter());
+    visitableObj.setFilterValues(resolvedFilterObject);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java
new file mode 100644
index 0000000..19ad3aa
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.resolver.resolverinfo.visitor;
+
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+
+public interface ResolvedFilterInfoVisitorIntf {
+
+  /**
+   * Visitor pattern is been used in this scenario inorder to populate the
+   * dimColResolvedFilterInfo visitable object with filter member values based
+   * on the visitor type, currently there 3 types of visitors custom,direct
+   * and no dictionary, all types of visitor populate the visitable instance
+   * as per its buisness logic which is different for all the visitors.
+   *
+   * @param visitableObj
+   * @param metadata
+   * @throws QueryExecutionException
+   */
+  void populateFilterResolvedInfo(DimColumnResolvedFilterInfo visitableObj,
+      FilterResolverMetadata metadata) throws FilterUnsupportedException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/model/CarbonQueryPlan.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/model/CarbonQueryPlan.java b/core/src/main/java/org/apache/carbondata/scan/model/CarbonQueryPlan.java
new file mode 100644
index 0000000..18945d6
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/model/CarbonQueryPlan.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ *
+ */
+package org.apache.carbondata.scan.model;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.scan.expression.Expression;
+
+/**
+ * This class contains all the logical information about the query like dimensions,measures,
+ * sort order, topN etc..
+ */
+public class CarbonQueryPlan implements Serializable {
+  /**
+   *
+   */
+  private static final long serialVersionUID = -9036044826928017164L;
+
+  /**
+   * Database name
+   */
+  private String databaseName;
+
+  /**
+   * Table name
+   */
+  private String tableName;
+
+  /**
+   * List of dimensions.
+   * Ex : select employee_name,department_name,sum(salary) from employee, then employee_name
+   * and department_name are dimensions
+   * If there is no dimensions asked in query then it would be remained as empty.
+   */
+  private List<QueryDimension> dimensions =
+      new ArrayList<QueryDimension>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+
+  /**
+   * List of measures.
+   * Ex : select employee_name,department_name,sum(salary) from employee, then sum(salary)
+   * would be measure.
+   * If there is no dimensions asked in query then it would be remained as empty.
+   */
+  private List<QueryMeasure> measures =
+      new ArrayList<QueryMeasure>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+
+  /**
+   * Limit
+   */
+  private int limit = -1;
+
+  /**
+   * If it is detail query, no need to aggregate in backend
+   */
+  private boolean detailQuery;
+
+  /**
+   * expression
+   */
+  private Expression expression;
+
+  /**
+   * queryId
+   */
+  private String queryId;
+
+  /**
+   * outLocationPath
+   */
+  private String outLocationPath;
+
+  /**
+   * isCountStarQuery
+   */
+  private boolean isCountStartQuery;
+
+  private List<QueryDimension> sortedDimensions;
+
+  /**
+   * If it is raw detail query, no need to aggregate in backend. And it reurns with dictionary data
+   * with out decoding.
+   */
+  private boolean rawDetailQuery;
+
+  /**
+   * Constructor created with table name.
+   *
+   * @param tableName
+   */
+  public CarbonQueryPlan(String tableName) {
+    this.tableName = tableName;
+  }
+
+  /**
+   * Constructor created with database name and table name.
+   *
+   * @param databaseName
+   * @param tableName
+   */
+  public CarbonQueryPlan(String databaseName, String tableName) {
+    this.tableName = tableName;
+    this.databaseName = databaseName;
+  }
+
+  /**
+   * @return the dimensions
+   */
+  public List<QueryDimension> getDimensions() {
+    return dimensions;
+  }
+
+  public void addDimension(QueryDimension dimension) {
+    this.dimensions.add(dimension);
+  }
+
+  /**
+   * @return the measures
+   */
+  public List<QueryMeasure> getMeasures() {
+    return measures;
+  }
+
+  public void addMeasure(QueryMeasure measure) {
+    this.measures.add(measure);
+  }
+
+  public Expression getFilterExpression() {
+    return expression;
+  }
+
+  public void setFilterExpression(Expression expression) {
+    this.expression = expression;
+  }
+
+  /**
+   * @return the databaseName
+   */
+  public String getDatabaseName() {
+    return databaseName;
+  }
+
+  /**
+   * @return the tableName
+   */
+  public String getTableName() {
+    return tableName;
+  }
+
+  /**
+   * @return the limit
+   */
+  public int getLimit() {
+    return limit;
+  }
+
+  /**
+   * @param limit the limit to set
+   */
+  public void setLimit(int limit) {
+    this.limit = limit;
+  }
+
+  /**
+   * @return the detailQuery
+   */
+  public boolean isDetailQuery() {
+    return detailQuery;
+  }
+
+  /**
+   * @param detailQuery the detailQuery to set
+   */
+  public void setDetailQuery(boolean detailQuery) {
+    this.detailQuery = detailQuery;
+  }
+
+  public String getQueryId() {
+    return queryId;
+  }
+
+  public void setQueryId(String queryId) {
+    this.queryId = queryId;
+  }
+
+  public String getOutLocationPath() {
+    return outLocationPath;
+  }
+
+  public void setOutLocationPath(String outLocationPath) {
+    this.outLocationPath = outLocationPath;
+  }
+
+  public boolean isCountStarQuery() {
+    return isCountStartQuery;
+  }
+
+  public void setCountStartQuery(boolean isCountStartQuery) {
+    this.isCountStartQuery = isCountStartQuery;
+  }
+
+  public List<QueryDimension> getSortedDimemsions() {
+    return sortedDimensions;
+  }
+
+  public void setSortedDimemsions(List<QueryDimension> dims) {
+    this.sortedDimensions = dims;
+  }
+
+  public boolean isRawDetailQuery() {
+    return rawDetailQuery;
+  }
+
+  public void setRawDetailQuery(boolean rawDetailQuery) {
+    this.rawDetailQuery = rawDetailQuery;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/model/QueryColumn.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/model/QueryColumn.java b/core/src/main/java/org/apache/carbondata/scan/model/QueryColumn.java
new file mode 100644
index 0000000..0aa5266
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/model/QueryColumn.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.model;
+
+import java.io.Serializable;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * query column  which will have information about column
+ */
+public class QueryColumn implements Serializable {
+
+  /**
+   * serialVersionUID
+   */
+  private static final long serialVersionUID = -4222306600480181084L;
+
+  /**
+   * name of the column
+   */
+  protected String columnName;
+
+  /**
+   * sort order in which column output will be sorted default it will be none
+   */
+  private SortOrderType sortOrder = SortOrderType.NONE;
+
+  /**
+   * query order in which result of the query will be send
+   */
+  private int queryOrder;
+
+  /**
+   * aggregation function applied on column
+   */
+
+  private String aggregationFunction=CarbonCommonConstants.DUMMY;
+
+  public QueryColumn(String columnName) {
+    this.columnName = columnName;
+  }
+
+  /**
+   * @return the sortOrder
+   */
+  public SortOrderType getSortOrder() {
+    return sortOrder;
+  }
+
+  /**
+   * @param sortOrder the sortOrder to set
+   */
+  public void setSortOrder(SortOrderType sortOrder) {
+    this.sortOrder = sortOrder;
+  }
+
+  /**
+   * @return the columnName
+   */
+  public String getColumnName() {
+    return columnName;
+  }
+
+  /**
+   * @return the queryOrder
+   */
+  public int getQueryOrder() {
+    return queryOrder;
+  }
+
+  /**
+   * @param queryOrder the queryOrder to set
+   */
+  public void setQueryOrder(int queryOrder) {
+    this.queryOrder = queryOrder;
+  }
+
+  /**
+   * @return the aggregationFunction
+   */
+  public String getAggregateFunction() {
+    return aggregationFunction;
+  }
+
+  /**
+   * @param aggregationFunction the aggregationFunction to set
+   */
+  public void setAggregateFunction(String aggregationFunction) {
+    this.aggregationFunction = aggregationFunction;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/model/QueryDimension.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/model/QueryDimension.java b/core/src/main/java/org/apache/carbondata/scan/model/QueryDimension.java
new file mode 100644
index 0000000..1f1f2cb
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/model/QueryDimension.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.model;
+
+import java.io.Serializable;
+
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+
+/**
+ * query plan dimension which will holds the information about the query plan dimension
+ * this is done to avoid heavy object serialization
+ */
+public class QueryDimension extends QueryColumn implements Serializable {
+
+  /**
+   * serialVersionUID
+   */
+  private static final long serialVersionUID = -8492704093776645651L;
+  /**
+   * actual dimension column
+   */
+  private transient CarbonDimension dimension;
+
+  public QueryDimension(String columName) {
+    super(columName);
+  }
+
+  /**
+   * @return the dimension
+   */
+  public CarbonDimension getDimension() {
+    return dimension;
+  }
+
+  /**
+   * @param dimension the dimension to set
+   */
+  public void setDimension(CarbonDimension dimension) {
+    this.dimension = dimension;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/model/QueryMeasure.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/model/QueryMeasure.java b/core/src/main/java/org/apache/carbondata/scan/model/QueryMeasure.java
new file mode 100644
index 0000000..0ea84c7
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/model/QueryMeasure.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.model;
+
+import java.io.Serializable;
+
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+
+/**
+ * query plan measure, this class will holds the information
+ * about measure present in the query, this is done to avoid the serialization
+ * of the heavy object
+ */
+public class QueryMeasure extends QueryColumn implements Serializable {
+
+  /**
+   * serialVersionUID
+   */
+  private static final long serialVersionUID = 1035512411375495414L;
+
+  /**
+   * actual carbon measure object
+   */
+  private transient CarbonMeasure measure;
+
+  public QueryMeasure(String columName) {
+    super(columName);
+  }
+
+  /**
+   * @return the measure
+   */
+  public CarbonMeasure getMeasure() {
+    return measure;
+  }
+
+  /**
+   * @param measure the measure to set
+   */
+  public void setMeasure(CarbonMeasure measure) {
+    this.measure = measure;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/model/QueryModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/model/QueryModel.java b/core/src/main/java/org/apache/carbondata/scan/model/QueryModel.java
new file mode 100644
index 0000000..81eb728
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/model/QueryModel.java
@@ -0,0 +1,507 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.model;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.TableBlockInfo;
+import org.apache.carbondata.core.carbon.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonColumn;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.carbon.querystatistics.QueryStatisticsRecorder;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.UnknownExpression;
+import org.apache.carbondata.scan.expression.conditional.ConditionalExpression;
+import org.apache.carbondata.scan.filter.resolver.FilterResolverIntf;
+
+/**
+ * Query model which will have all the detail
+ * about the query, This will be sent from driver to executor '
+ * This will be refereed to executing the query.
+ */
+public class QueryModel implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = -4674677234007089052L;
+  /**
+   * this will hold the information about the dictionary dimension
+   * which to
+   */
+  public transient Map<String, Dictionary> columnToDictionaryMapping;
+  /**
+   * Number of records to keep in memory.
+   */
+  public int inMemoryRecordSize;
+  /**
+   * list of dimension selected for in query
+   */
+  private List<QueryDimension> queryDimension;
+  /**
+   * list of dimension in which sorting is applied
+   */
+  private List<QueryDimension> sortDimension;
+  /**
+   * list of measure selected in query
+   */
+  private List<QueryMeasure> queryMeasures;
+  /**
+   * query id
+   */
+  private String queryId;
+  /**
+   * to check if it a aggregate table
+   */
+  private boolean isAggTable;
+  /**
+   * filter tree
+   */
+  private FilterResolverIntf filterExpressionResolverTree;
+  /**
+   * in case of lime query we need to know how many
+   * records will passed from executor
+   */
+  private int limit;
+
+  /**
+   * to check if it is a count star query , so processing will be different
+   */
+  private boolean isCountStarQuery;
+  /**
+   * to check whether aggregation is required during query execution
+   */
+  private boolean detailQuery;
+  /**
+   * table block information in which query will be executed
+   */
+  private List<TableBlockInfo> tableBlockInfos;
+  /**
+   * sort in which dimension will be get sorted
+   */
+  private byte[] sortOrder;
+  /**
+   * absolute table identifier
+   */
+  private AbsoluteTableIdentifier absoluteTableIdentifier;
+  /**
+   * in case of detail query with sort we are spilling to disk
+   * to this location will be used to write the temp file in this location
+   */
+  private String queryTempLocation;
+  /**
+   * To handle most of the computation in query engines like spark and hive, carbon should give
+   * raw detailed records to it.
+   */
+  private boolean forcedDetailRawQuery;
+  /**
+   * paritition column list
+   */
+  private List<String> paritionColumns;
+  /**
+   * table on which query will be executed
+   * TODO need to remove this ad pass only the path
+   * and carbon metadata will load the table from metadata file
+   */
+  private CarbonTable table;
+
+  private QueryStatisticsRecorder statisticsRecorder;
+
+  public QueryModel() {
+    tableBlockInfos = new ArrayList<TableBlockInfo>();
+    queryDimension = new ArrayList<QueryDimension>();
+    queryMeasures = new ArrayList<QueryMeasure>();
+    sortDimension = new ArrayList<QueryDimension>();
+    sortOrder = new byte[0];
+    paritionColumns = new ArrayList<String>();
+  }
+
+  public static QueryModel createModel(AbsoluteTableIdentifier absoluteTableIdentifier,
+      CarbonQueryPlan queryPlan, CarbonTable carbonTable) {
+    QueryModel queryModel = new QueryModel();
+    String factTableName = carbonTable.getFactTableName();
+    queryModel.setAbsoluteTableIdentifier(absoluteTableIdentifier);
+
+    fillQueryModel(queryPlan, carbonTable, queryModel, factTableName);
+
+    queryModel.setLimit(queryPlan.getLimit());
+    queryModel.setDetailQuery(queryPlan.isDetailQuery());
+    queryModel.setForcedDetailRawQuery(queryPlan.isRawDetailQuery());
+    queryModel.setQueryId(queryPlan.getQueryId());
+    queryModel.setQueryTempLocation(queryPlan.getOutLocationPath());
+    return queryModel;
+  }
+
+  private static void fillQueryModel(CarbonQueryPlan queryPlan, CarbonTable carbonTable,
+      QueryModel queryModel, String factTableName) {
+    queryModel.setAbsoluteTableIdentifier(carbonTable.getAbsoluteTableIdentifier());
+    queryModel.setQueryDimension(queryPlan.getDimensions());
+    fillSortInfoInModel(queryModel, queryPlan.getSortedDimemsions());
+    queryModel.setQueryMeasures(queryPlan.getMeasures());
+    if (null != queryPlan.getFilterExpression()) {
+      processFilterExpression(queryPlan.getFilterExpression(),
+          carbonTable.getDimensionByTableName(factTableName),
+          carbonTable.getMeasureByTableName(factTableName));
+    }
+    queryModel.setCountStarQuery(queryPlan.isCountStarQuery());
+    //TODO need to remove this code, and executor will load the table
+    // from file metadata
+    queryModel.setTable(carbonTable);
+  }
+
+  private static void fillSortInfoInModel(QueryModel executorModel,
+      List<QueryDimension> sortedDims) {
+    if (null != sortedDims) {
+      byte[] sortOrderByteArray = new byte[sortedDims.size()];
+      int i = 0;
+      for (QueryColumn mdim : sortedDims) {
+        sortOrderByteArray[i++] = (byte) mdim.getSortOrder().ordinal();
+      }
+      executorModel.setSortOrder(sortOrderByteArray);
+      executorModel.setSortDimension(sortedDims);
+    } else {
+      executorModel.setSortOrder(new byte[0]);
+      executorModel.setSortDimension(new ArrayList<QueryDimension>(0));
+    }
+
+  }
+
+  public static void processFilterExpression(
+      Expression filterExpression, List<CarbonDimension> dimensions, List<CarbonMeasure> measures) {
+    if (null != filterExpression) {
+      if (null != filterExpression.getChildren() && filterExpression.getChildren().size() == 0) {
+        if (filterExpression instanceof ConditionalExpression) {
+          List<ColumnExpression> listOfCol =
+              ((ConditionalExpression) filterExpression).getColumnList();
+          for (ColumnExpression expression : listOfCol) {
+            setDimAndMsrColumnNode(dimensions, measures, (ColumnExpression) expression);
+          }
+
+        }
+      }
+      for (Expression expression : filterExpression.getChildren()) {
+
+        if (expression instanceof ColumnExpression) {
+          setDimAndMsrColumnNode(dimensions, measures, (ColumnExpression) expression);
+        } else if (expression instanceof UnknownExpression) {
+          UnknownExpression exp = ((UnknownExpression) expression);
+          List<ColumnExpression> listOfColExpression = exp.getAllColumnList();
+          for (ColumnExpression col : listOfColExpression) {
+            setDimAndMsrColumnNode(dimensions, measures, col);
+          }
+        } else {
+          processFilterExpression(expression, dimensions, measures);
+        }
+      }
+    }
+
+  }
+
+  private static CarbonMeasure getCarbonMetadataMeasure(String name, List<CarbonMeasure> measures) {
+    for (CarbonMeasure measure : measures) {
+      if (measure.getColName().equalsIgnoreCase(name)) {
+        return measure;
+      }
+    }
+    return null;
+  }
+
+  private static void setDimAndMsrColumnNode(List<CarbonDimension> dimensions,
+      List<CarbonMeasure> measures, ColumnExpression col) {
+    CarbonDimension dim;
+    CarbonMeasure msr;
+    String columnName;
+    columnName = col.getColumnName();
+    dim = CarbonUtil.findDimension(dimensions, columnName);
+    col.setCarbonColumn(dim);
+    col.setDimension(dim);
+    col.setDimension(true);
+    if (null == dim) {
+      msr = getCarbonMetadataMeasure(columnName, measures);
+      col.setCarbonColumn(msr);
+      col.setDimension(false);
+    }
+  }
+
+  /**
+   * It gets the projection columns
+   */
+  public CarbonColumn[] getProjectionColumns() {
+    CarbonColumn[] carbonColumns =
+        new CarbonColumn[getQueryDimension().size() + getQueryMeasures().size()];
+    for (QueryDimension dimension : getQueryDimension()) {
+      carbonColumns[dimension.getQueryOrder()] = dimension.getDimension();
+    }
+    for (QueryMeasure msr : getQueryMeasures()) {
+      carbonColumns[msr.getQueryOrder()] = msr.getMeasure();
+    }
+    return carbonColumns;
+  }
+
+  /**
+   * @return the queryDimension
+   */
+  public List<QueryDimension> getQueryDimension() {
+    return queryDimension;
+  }
+
+  /**
+   * @param queryDimension the queryDimension to set
+   */
+  public void setQueryDimension(List<QueryDimension> queryDimension) {
+    this.queryDimension = queryDimension;
+  }
+
+  /**
+   * @return the queryMeasures
+   */
+  public List<QueryMeasure> getQueryMeasures() {
+    return queryMeasures;
+  }
+
+  /**
+   * @param queryMeasures the queryMeasures to set
+   */
+  public void setQueryMeasures(List<QueryMeasure> queryMeasures) {
+    this.queryMeasures = queryMeasures;
+  }
+
+  /**
+   * @return the queryId
+   */
+  public String getQueryId() {
+    return queryId;
+  }
+
+  /**
+   * @param queryId the queryId to set
+   */
+  public void setQueryId(String queryId) {
+    this.queryId = queryId;
+  }
+
+  /**
+   * @return the isAggTable
+   */
+  public boolean isAggTable() {
+    return isAggTable;
+  }
+
+  /**
+   * @param isAggTable the isAggTable to set
+   */
+  public void setAggTable(boolean isAggTable) {
+    this.isAggTable = isAggTable;
+  }
+
+  /**
+   * @return the limit
+   */
+  public int getLimit() {
+    return limit;
+  }
+
+  /**
+   * @param limit the limit to set
+   */
+  public void setLimit(int limit) {
+    this.limit = limit;
+  }
+
+  /**
+   * @return the isCountStarQuery
+   */
+  public boolean isCountStarQuery() {
+    return isCountStarQuery;
+  }
+
+  /**
+   * @param isCountStarQuery the isCountStarQuery to set
+   */
+  public void setCountStarQuery(boolean isCountStarQuery) {
+    this.isCountStarQuery = isCountStarQuery;
+  }
+
+  /**
+   * @return the isdetailQuery
+   */
+  public boolean isDetailQuery() {
+    return detailQuery;
+  }
+
+  public void setDetailQuery(boolean detailQuery) {
+    this.detailQuery = detailQuery;
+  }
+
+  /**
+   * @return the tableBlockInfos
+   */
+  public List<TableBlockInfo> getTableBlockInfos() {
+    return tableBlockInfos;
+  }
+
+  /**
+   * @param tableBlockInfos the tableBlockInfos to set
+   */
+  public void setTableBlockInfos(List<TableBlockInfo> tableBlockInfos) {
+    this.tableBlockInfos = tableBlockInfos;
+  }
+
+  /**
+   * @return the queryTempLocation
+   */
+  public String getQueryTempLocation() {
+    return queryTempLocation;
+  }
+
+  /**
+   * @param queryTempLocation the queryTempLocation to set
+   */
+  public void setQueryTempLocation(String queryTempLocation) {
+    this.queryTempLocation = queryTempLocation;
+  }
+
+  /**
+   * @return the sortOrder
+   */
+  public byte[] getSortOrder() {
+    return sortOrder;
+  }
+
+  /**
+   * @param sortOrder the sortOrder to set
+   */
+  public void setSortOrder(byte[] sortOrder) {
+    this.sortOrder = sortOrder;
+  }
+
+  /**
+   * @return the sortDimension
+   */
+  public List<QueryDimension> getSortDimension() {
+    return sortDimension;
+  }
+
+  /**
+   * @param sortDimension the sortDimension to set
+   */
+  public void setSortDimension(List<QueryDimension> sortDimension) {
+    this.sortDimension = sortDimension;
+  }
+
+  /**
+   * @return the filterEvaluatorTree
+   */
+  public FilterResolverIntf getFilterExpressionResolverTree() {
+    return filterExpressionResolverTree;
+  }
+
+  public void setFilterExpressionResolverTree(FilterResolverIntf filterExpressionResolverTree) {
+    this.filterExpressionResolverTree = filterExpressionResolverTree;
+  }
+
+  /**
+   * @return the absoluteTableIdentifier
+   */
+  public AbsoluteTableIdentifier getAbsoluteTableIdentifier() {
+    return absoluteTableIdentifier;
+  }
+
+  /**
+   * @param absoluteTableIdentifier the absoluteTableIdentifier to set
+   */
+  public void setAbsoluteTableIdentifier(AbsoluteTableIdentifier absoluteTableIdentifier) {
+    this.absoluteTableIdentifier = absoluteTableIdentifier;
+  }
+
+  /**
+   * @return the paritionColumns
+   */
+  public List<String> getParitionColumns() {
+    return paritionColumns;
+  }
+
+  /**
+   * @param paritionColumns the paritionColumns to set
+   */
+  public void setParitionColumns(List<String> paritionColumns) {
+    this.paritionColumns = paritionColumns;
+  }
+
+  /**
+   * @return the table
+   */
+  public CarbonTable getTable() {
+    return table;
+  }
+
+  /**
+   * @param table the table to set
+   */
+  public void setTable(CarbonTable table) {
+    this.table = table;
+  }
+
+  public boolean isForcedDetailRawQuery() {
+    return forcedDetailRawQuery;
+  }
+
+  public void setForcedDetailRawQuery(boolean forcedDetailRawQuery) {
+    this.forcedDetailRawQuery = forcedDetailRawQuery;
+  }
+
+  /**
+   * @return
+   */
+  public Map<String, Dictionary> getColumnToDictionaryMapping() {
+    return columnToDictionaryMapping;
+  }
+
+  /**
+   * @param columnToDictionaryMapping
+   */
+  public void setColumnToDictionaryMapping(Map<String, Dictionary> columnToDictionaryMapping) {
+    this.columnToDictionaryMapping = columnToDictionaryMapping;
+  }
+
+  public int getInMemoryRecordSize() {
+    return inMemoryRecordSize;
+  }
+
+  public void setInMemoryRecordSize(int inMemoryRecordSize) {
+    this.inMemoryRecordSize = inMemoryRecordSize;
+  }
+
+  public QueryStatisticsRecorder getStatisticsRecorder() {
+    return statisticsRecorder;
+  }
+
+  public void setStatisticsRecorder(QueryStatisticsRecorder statisticsRecorder) {
+    this.statisticsRecorder = statisticsRecorder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/model/QuerySchemaInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/model/QuerySchemaInfo.java b/core/src/main/java/org/apache/carbondata/scan/model/QuerySchemaInfo.java
new file mode 100644
index 0000000..185609f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/model/QuerySchemaInfo.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.model;
+
+import java.io.Serializable;
+
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+
+public class QuerySchemaInfo implements Serializable {
+
+  private int[] maskedByteIndexes;
+
+  private KeyGenerator keyGenerator;
+
+  private QueryDimension[] queryDimensions;
+
+  private QueryMeasure[] queryMeasures;
+
+  private int[] queryOrder;
+
+  private int[] queryReverseOrder;
+
+  public int[] getMaskedByteIndexes() {
+    return maskedByteIndexes;
+  }
+
+  public void setMaskedByteIndexes(int[] maskedByteIndexes) {
+    this.maskedByteIndexes = maskedByteIndexes;
+  }
+
+  public KeyGenerator getKeyGenerator() {
+    return keyGenerator;
+  }
+
+  public void setKeyGenerator(KeyGenerator keyGenerator) {
+    this.keyGenerator = keyGenerator;
+  }
+
+  public QueryDimension[] getQueryDimensions() {
+    return queryDimensions;
+  }
+
+  public void setQueryDimensions(QueryDimension[] queryDimensions) {
+    this.queryDimensions = queryDimensions;
+  }
+
+  public QueryMeasure[] getQueryMeasures() {
+    return queryMeasures;
+  }
+
+  public void setQueryMeasures(QueryMeasure[] queryMeasures) {
+    this.queryMeasures = queryMeasures;
+  }
+
+  public int[] getQueryOrder() {
+    return queryOrder;
+  }
+
+  public void setQueryOrder(int[] queryOrder) {
+    this.queryOrder = queryOrder;
+  }
+
+  public int[] getQueryReverseOrder() {
+    return queryReverseOrder;
+  }
+
+  public void setQueryReverseOrder(int[] queryReverseOrder) {
+    this.queryReverseOrder = queryReverseOrder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/model/SortOrderType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/model/SortOrderType.java b/core/src/main/java/org/apache/carbondata/scan/model/SortOrderType.java
new file mode 100644
index 0000000..ba725b7
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/model/SortOrderType.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.model;
+
+/**
+ * enum for sorting the columns
+ */
+public enum SortOrderType {
+
+    /**
+     * Ascending order
+     */
+    ASC(0),
+
+    /**
+     * Descending order.
+     */
+    DSC(1),
+
+    /**
+     * No order mentioned
+     */
+    NONE(-1);
+  /**
+   * Order type in numeric
+   */
+  private int orderType;
+
+  SortOrderType(int orderType) {
+    this.orderType = orderType;
+  }
+
+  /**
+   * Order type in number
+   *
+   * @return orderType int
+   */
+  public int getOrderType() {
+    return orderType;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/processor/AbstractDataBlockIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/processor/AbstractDataBlockIterator.java b/core/src/main/java/org/apache/carbondata/scan/processor/AbstractDataBlockIterator.java
new file mode 100644
index 0000000..c6bc32c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/processor/AbstractDataBlockIterator.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.processor;
+
+import java.util.List;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.scan.collector.ScannedResultCollector;
+import org.apache.carbondata.scan.collector.impl.DictionaryBasedResultCollector;
+import org.apache.carbondata.scan.collector.impl.RawBasedResultCollector;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+import org.apache.carbondata.scan.scanner.BlockletScanner;
+import org.apache.carbondata.scan.scanner.impl.FilterScanner;
+import org.apache.carbondata.scan.scanner.impl.NonFilterScanner;
+
+/**
+ * This abstract class provides a skeletal implementation of the
+ * Block iterator.
+ */
+public abstract class AbstractDataBlockIterator extends CarbonIterator<List<Object[]>> {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(AbstractDataBlockIterator.class.getName());
+  /**
+   * iterator which will be used to iterate over data blocks
+   */
+  protected CarbonIterator<DataRefNode> dataBlockIterator;
+
+  /**
+   * execution details
+   */
+  protected BlockExecutionInfo blockExecutionInfo;
+
+  /**
+   * result collector which will be used to aggregate the scanned result
+   */
+  protected ScannedResultCollector scannerResultAggregator;
+
+  /**
+   * processor which will be used to process the block processing can be
+   * filter processing or non filter processing
+   */
+  protected BlockletScanner blockletScanner;
+
+  /**
+   * to hold the data block
+   */
+  protected BlocksChunkHolder blocksChunkHolder;
+
+  /**
+   * batch size of result
+   */
+  protected int batchSize;
+
+  protected AbstractScannedResult scannedResult;
+
+  public AbstractDataBlockIterator(BlockExecutionInfo blockExecutionInfo,
+      FileHolder fileReader, int batchSize) {
+    this.blockExecutionInfo = blockExecutionInfo;
+    dataBlockIterator = new BlockletIterator(blockExecutionInfo.getFirstDataBlock(),
+        blockExecutionInfo.getNumberOfBlockToScan());
+    blocksChunkHolder = new BlocksChunkHolder(blockExecutionInfo.getTotalNumberDimensionBlock(),
+        blockExecutionInfo.getTotalNumberOfMeasureBlock());
+    blocksChunkHolder.setFileReader(fileReader);
+
+    if (blockExecutionInfo.getFilterExecuterTree() != null) {
+      blockletScanner = new FilterScanner(blockExecutionInfo);
+    } else {
+      blockletScanner = new NonFilterScanner(blockExecutionInfo);
+    }
+    if (blockExecutionInfo.isRawRecordDetailQuery()) {
+      this.scannerResultAggregator =
+          new RawBasedResultCollector(blockExecutionInfo);
+    } else {
+      this.scannerResultAggregator =
+          new DictionaryBasedResultCollector(blockExecutionInfo);
+    }
+    this.batchSize = batchSize;
+  }
+
+  public boolean hasNext() {
+    if (scannedResult != null && scannedResult.hasNext()) {
+      return true;
+    } else {
+      return dataBlockIterator.hasNext();
+    }
+  }
+
+  protected boolean updateScanner() {
+    try {
+      if (scannedResult != null && scannedResult.hasNext()) {
+        return true;
+      } else {
+        scannedResult = getNextScannedResult();
+        while (scannedResult != null) {
+          if (scannedResult.hasNext()) {
+            return true;
+          }
+          scannedResult = getNextScannedResult();
+        }
+        return false;
+      }
+    } catch (QueryExecutionException ex) {
+      throw new RuntimeException(ex);
+    }
+  }
+
+  private AbstractScannedResult getNextScannedResult() throws QueryExecutionException {
+    if (dataBlockIterator.hasNext()) {
+      blocksChunkHolder.setDataBlock(dataBlockIterator.next());
+      blocksChunkHolder.reset();
+      return blockletScanner.scanBlocklet(blocksChunkHolder);
+    }
+    return null;
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/processor/BlockletIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/processor/BlockletIterator.java b/core/src/main/java/org/apache/carbondata/scan/processor/BlockletIterator.java
new file mode 100644
index 0000000..73ac1be
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/processor/BlockletIterator.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.processor;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+
+/**
+ * Below class will be used to iterate over data block
+ */
+public class BlockletIterator extends CarbonIterator<DataRefNode> {
+  /**
+   * data store block
+   */
+  protected DataRefNode datablock;
+  /**
+   * block counter to keep a track how many block has been processed
+   */
+  private int blockCounter;
+
+  /**
+   * flag to be used to check any more data block is present or not
+   */
+  private boolean hasNext = true;
+
+  /**
+   * total number blocks assgned to this iterator
+   */
+  private long totalNumberOfBlocksToScan;
+
+  /**
+   * Constructor
+   *
+   * @param datablock                 first data block
+   * @param totalNumberOfBlocksToScan total number of blocks to be scanned
+   */
+  public BlockletIterator(DataRefNode datablock, long totalNumberOfBlocksToScan) {
+    this.datablock = datablock;
+    this.totalNumberOfBlocksToScan = totalNumberOfBlocksToScan;
+  }
+
+  /**
+   * is all the blocks assigned to this iterator has been processed
+   */
+  @Override public boolean hasNext() {
+    return hasNext;
+  }
+
+  @Override
+  /**
+   * To get the next block
+   * @return next data block
+   *
+   */
+  public DataRefNode next() {
+    // get the current blocks
+    DataRefNode datablockTemp = datablock;
+    // store the next data block
+    datablock = datablock.getNextDataRefNode();
+    // increment the counter
+    blockCounter++;
+    // if all the data block is processed then
+    // set the has next flag to false
+    // or if number of blocks assigned to this iterator is processed
+    // then also set the hasnext flag to false
+    if (null == datablock || blockCounter >= this.totalNumberOfBlocksToScan) {
+      hasNext = false;
+    }
+    return datablockTemp;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/processor/BlocksChunkHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/processor/BlocksChunkHolder.java b/core/src/main/java/org/apache/carbondata/scan/processor/BlocksChunkHolder.java
new file mode 100644
index 0000000..47cac0c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/processor/BlocksChunkHolder.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.processor;
+
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+/**
+ * Block chunk holder which will hold the dimension and
+ * measure chunk
+ */
+public class BlocksChunkHolder {
+
+  /**
+   * dimension column data chunk
+   */
+  private DimensionColumnDataChunk[] dimensionDataChunk;
+
+  /**
+   * measure column data chunk
+   */
+  private MeasureColumnDataChunk[] measureDataChunk;
+
+  /**
+   * file reader which will use to read the block from file
+   */
+  private FileHolder fileReader;
+
+  /**
+   * data block
+   */
+  private DataRefNode dataBlock;
+
+  public BlocksChunkHolder(int numberOfDimensionBlock, int numberOfMeasureBlock) {
+    dimensionDataChunk = new DimensionColumnDataChunk[numberOfDimensionBlock];
+    measureDataChunk = new MeasureColumnDataChunk[numberOfMeasureBlock];
+  }
+
+  /**
+   * @return the dimensionDataChunk
+   */
+  public DimensionColumnDataChunk[] getDimensionDataChunk() {
+    return dimensionDataChunk;
+  }
+
+  /**
+   * @param dimensionDataChunk the dimensionDataChunk to set
+   */
+  public void setDimensionDataChunk(DimensionColumnDataChunk[] dimensionDataChunk) {
+    this.dimensionDataChunk = dimensionDataChunk;
+  }
+
+  /**
+   * @return the measureDataChunk
+   */
+  public MeasureColumnDataChunk[] getMeasureDataChunk() {
+    return measureDataChunk;
+  }
+
+  /**
+   * @param measureDataChunk the measureDataChunk to set
+   */
+  public void setMeasureDataChunk(MeasureColumnDataChunk[] measureDataChunk) {
+    this.measureDataChunk = measureDataChunk;
+  }
+
+  /**
+   * @return the fileReader
+   */
+  public FileHolder getFileReader() {
+    return fileReader;
+  }
+
+  /**
+   * @param fileReader the fileReader to set
+   */
+  public void setFileReader(FileHolder fileReader) {
+    this.fileReader = fileReader;
+  }
+
+  /**
+   * @return the dataBlock
+   */
+  public DataRefNode getDataBlock() {
+    return dataBlock;
+  }
+
+  /**
+   * @param dataBlock the dataBlock to set
+   */
+  public void setDataBlock(DataRefNode dataBlock) {
+    this.dataBlock = dataBlock;
+  }
+
+  /***
+   * To reset the measure chunk and dimension chunk
+   * array
+   */
+  public void reset() {
+    for (int i = 0; i < measureDataChunk.length; i++) {
+      this.measureDataChunk[i] = null;
+    }
+    for (int i = 0; i < dimensionDataChunk.length; i++) {
+      this.dimensionDataChunk[i] = null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/processor/impl/DataBlockIteratorImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/processor/impl/DataBlockIteratorImpl.java b/core/src/main/java/org/apache/carbondata/scan/processor/impl/DataBlockIteratorImpl.java
new file mode 100644
index 0000000..3bdbf3a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/processor/impl/DataBlockIteratorImpl.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.processor.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.processor.AbstractDataBlockIterator;
+
+/**
+ * Below class will be used to process the block for detail query
+ */
+public class DataBlockIteratorImpl extends AbstractDataBlockIterator {
+
+  /**
+   * DataBlockIteratorImpl Constructor
+   *
+   * @param blockExecutionInfo execution information
+   */
+  public DataBlockIteratorImpl(BlockExecutionInfo blockExecutionInfo, FileHolder fileReader,
+      int batchSize) {
+    super(blockExecutionInfo, fileReader, batchSize);
+  }
+
+  /**
+   * It scans the block and returns the result with @batchSize
+   *
+   * @return Result of @batchSize
+   */
+  public List<Object[]> next() {
+    List<Object[]> collectedResult = null;
+    if (updateScanner()) {
+      collectedResult = this.scannerResultAggregator.collectData(scannedResult, batchSize);
+      while (collectedResult.size() < batchSize && updateScanner()) {
+        List<Object[]> data = this.scannerResultAggregator
+            .collectData(scannedResult, batchSize - collectedResult.size());
+        collectedResult.addAll(data);
+      }
+    } else {
+      collectedResult = new ArrayList<>();
+    }
+    return collectedResult;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/AbstractScannedResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/AbstractScannedResult.java b/core/src/main/java/org/apache/carbondata/scan/result/AbstractScannedResult.java
new file mode 100644
index 0000000..1e7b7d8
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/AbstractScannedResult.java
@@ -0,0 +1,437 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.result;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.Map;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+import org.apache.carbondata.scan.filter.GenericQueryType;
+
+/**
+ * Scanned result class which will store and provide the result on request
+ */
+public abstract class AbstractScannedResult {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(AbstractScannedResult.class.getName());
+  /**
+   * current row number
+   */
+  protected int currentRow = -1;
+  /**
+   * row mapping indexes
+   */
+  protected int[] rowMapping;
+  /**
+   * key size of the fixed length column
+   */
+  private int fixedLengthKeySize;
+  /**
+   * total number of rows
+   */
+  private int totalNumberOfRows;
+  /**
+   * to keep track of number of rows process
+   */
+  private int rowCounter;
+  /**
+   * dimension column data chunk
+   */
+  private DimensionColumnDataChunk[] dataChunks;
+  /**
+   * measure column data chunk
+   */
+  private MeasureColumnDataChunk[] measureDataChunks;
+  /**
+   * dictionary column block index in file
+   */
+  private int[] dictionaryColumnBlockIndexes;
+
+  /**
+   * no dictionary column block index in file
+   */
+  private int[] noDictionaryColumnBlockIndexes;
+
+  /**
+   * column group to is key structure info
+   * which will be used to get the key from the complete
+   * column group key
+   * For example if only one dimension of the column group is selected
+   * then from complete column group key it will be used to mask the key and
+   * get the particular column key
+   */
+  private Map<Integer, KeyStructureInfo> columnGroupKeyStructureInfo;
+
+  /**
+   *
+   */
+  private Map<Integer, GenericQueryType> complexParentIndexToQueryMap;
+
+  private int totalDimensionsSize;
+
+  /**
+   * parent block indexes
+   */
+  private int[] complexParentBlockIndexes;
+
+  public AbstractScannedResult(BlockExecutionInfo blockExecutionInfo) {
+    this.fixedLengthKeySize = blockExecutionInfo.getFixedLengthKeySize();
+    this.noDictionaryColumnBlockIndexes = blockExecutionInfo.getNoDictionaryBlockIndexes();
+    this.dictionaryColumnBlockIndexes = blockExecutionInfo.getDictionaryColumnBlockIndex();
+    this.columnGroupKeyStructureInfo = blockExecutionInfo.getColumnGroupToKeyStructureInfo();
+    this.complexParentIndexToQueryMap = blockExecutionInfo.getComlexDimensionInfoMap();
+    this.complexParentBlockIndexes = blockExecutionInfo.getComplexColumnParentBlockIndexes();
+    this.totalDimensionsSize = blockExecutionInfo.getQueryDimensions().length;
+  }
+
+  /**
+   * Below method will be used to set the dimension chunks
+   * which will be used to create a row
+   *
+   * @param dataChunks dimension chunks used in query
+   */
+  public void setDimensionChunks(DimensionColumnDataChunk[] dataChunks) {
+    this.dataChunks = dataChunks;
+  }
+
+  /**
+   * Below method will be used to set the measure column chunks
+   *
+   * @param measureDataChunks measure data chunks
+   */
+  public void setMeasureChunks(MeasureColumnDataChunk[] measureDataChunks) {
+    this.measureDataChunks = measureDataChunks;
+  }
+
+  /**
+   * Below method will be used to get the chunk based in measure ordinal
+   *
+   * @param ordinal measure ordinal
+   * @return measure column chunk
+   */
+  public MeasureColumnDataChunk getMeasureChunk(int ordinal) {
+    return measureDataChunks[ordinal];
+  }
+
+  /**
+   * Below method will be used to get the key for all the dictionary dimensions
+   * which is present in the query
+   *
+   * @param rowId row id selected after scanning
+   * @return return the dictionary key
+   */
+  protected byte[] getDictionaryKeyArray(int rowId) {
+    byte[] completeKey = new byte[fixedLengthKeySize];
+    int offset = 0;
+    for (int i = 0; i < this.dictionaryColumnBlockIndexes.length; i++) {
+      offset += dataChunks[dictionaryColumnBlockIndexes[i]]
+          .fillChunkData(completeKey, offset, rowId,
+              columnGroupKeyStructureInfo.get(dictionaryColumnBlockIndexes[i]));
+    }
+    rowCounter++;
+    return completeKey;
+  }
+
+  /**
+   * Below method will be used to get the key for all the dictionary dimensions
+   * in integer array format which is present in the query
+   *
+   * @param rowId row id selected after scanning
+   * @return return the dictionary key
+   */
+  protected int[] getDictionaryKeyIntegerArray(int rowId) {
+    int[] completeKey = new int[totalDimensionsSize];
+    int column = 0;
+    for (int i = 0; i < this.dictionaryColumnBlockIndexes.length; i++) {
+      column = dataChunks[dictionaryColumnBlockIndexes[i]]
+          .fillConvertedChunkData(rowId, column, completeKey,
+              columnGroupKeyStructureInfo.get(dictionaryColumnBlockIndexes[i]));
+    }
+    rowCounter++;
+    return completeKey;
+  }
+
+  /**
+   * Just increment the counter incase of query only on measures.
+   */
+  public void incrementCounter() {
+    rowCounter ++;
+    currentRow ++;
+  }
+
+  /**
+   * Below method will be used to get the dimension data based on dimension
+   * ordinal and index
+   *
+   * @param dimOrdinal dimension ordinal present in the query
+   * @param rowId      row index
+   * @return dimension data based on row id
+   */
+  protected byte[] getDimensionData(int dimOrdinal, int rowId) {
+    return dataChunks[dimOrdinal].getChunkData(rowId);
+  }
+
+  /**
+   * Below method will be used to get the dimension key array
+   * for all the no dictionary dimension present in the query
+   *
+   * @param rowId row number
+   * @return no dictionary keys for all no dictionary dimension
+   */
+  protected byte[][] getNoDictionaryKeyArray(int rowId) {
+    byte[][] noDictionaryColumnsKeys = new byte[noDictionaryColumnBlockIndexes.length][];
+    int position = 0;
+    for (int i = 0; i < this.noDictionaryColumnBlockIndexes.length; i++) {
+      noDictionaryColumnsKeys[position++] =
+          dataChunks[noDictionaryColumnBlockIndexes[i]].getChunkData(rowId);
+    }
+    return noDictionaryColumnsKeys;
+  }
+
+  /**
+   * Below method will be used to get the dimension key array
+   * for all the no dictionary dimension present in the query
+   *
+   * @param rowId row number
+   * @return no dictionary keys for all no dictionary dimension
+   */
+  protected String[] getNoDictionaryKeyStringArray(int rowId) {
+    String[] noDictionaryColumnsKeys = new String[noDictionaryColumnBlockIndexes.length];
+    int position = 0;
+    for (int i = 0; i < this.noDictionaryColumnBlockIndexes.length; i++) {
+      noDictionaryColumnsKeys[position++] =
+          new String(dataChunks[noDictionaryColumnBlockIndexes[i]].getChunkData(rowId));
+    }
+    return noDictionaryColumnsKeys;
+  }
+
+  /**
+   * Below method will be used to get the complex type keys array based
+   * on row id for all the complex type dimension selected in query
+   *
+   * @param rowId row number
+   * @return complex type key array for all the complex dimension selected in query
+   */
+  protected byte[][] getComplexTypeKeyArray(int rowId) {
+    byte[][] complexTypeData = new byte[complexParentBlockIndexes.length][];
+    for (int i = 0; i < complexTypeData.length; i++) {
+      GenericQueryType genericQueryType =
+          complexParentIndexToQueryMap.get(complexParentBlockIndexes[i]);
+      ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
+      DataOutputStream dataOutput = new DataOutputStream(byteStream);
+      try {
+        genericQueryType.parseBlocksAndReturnComplexColumnByteArray(dataChunks, rowId, dataOutput);
+        complexTypeData[i] = byteStream.toByteArray();
+      } catch (IOException e) {
+        LOGGER.error(e);
+      } finally {
+        CarbonUtil.closeStreams(dataOutput);
+        CarbonUtil.closeStreams(byteStream);
+      }
+    }
+    return complexTypeData;
+  }
+
+  /**
+   * @return return the total number of row after scanning
+   */
+  public int numberOfOutputRows() {
+    return this.totalNumberOfRows;
+  }
+
+  /**
+   * to check whether any more row is present in the result
+   *
+   * @return
+   */
+  public boolean hasNext() {
+    return rowCounter < this.totalNumberOfRows;
+  }
+
+  /**
+   * As this class will be a flyweight object so
+   * for one block all the blocklet scanning will use same result object
+   * in that case we need to reset the counter to zero so
+   * for new result it will give the result from zero
+   */
+  public void reset() {
+    rowCounter = 0;
+    currentRow = -1;
+  }
+
+  /**
+   * @param totalNumberOfRows set total of number rows valid after scanning
+   */
+  public void setNumberOfRows(int totalNumberOfRows) {
+    this.totalNumberOfRows = totalNumberOfRows;
+  }
+
+  /**
+   * After applying filter it will return the  bit set with the valid row indexes
+   * so below method will be used to set the row indexes
+   *
+   * @param indexes
+   */
+  public void setIndexes(int[] indexes) {
+    this.rowMapping = indexes;
+  }
+
+  /**
+   * Below method will be used to check whether measure value is null or not
+   *
+   * @param ordinal  measure ordinal
+   * @param rowIndex row number to be checked
+   * @return whether it is null or not
+   */
+  protected boolean isNullMeasureValue(int ordinal, int rowIndex) {
+    return measureDataChunks[ordinal].getNullValueIndexHolder().getBitSet().get(rowIndex);
+  }
+
+  /**
+   * Below method will be used to get the measure value of
+   * long type
+   *
+   * @param ordinal  measure ordinal
+   * @param rowIndex row number of the measure value
+   * @return measure value of long type
+   */
+  protected long getLongMeasureValue(int ordinal, int rowIndex) {
+    return measureDataChunks[ordinal].getMeasureDataHolder().getReadableLongValueByIndex(rowIndex);
+  }
+
+  /**
+   * Below method will be used to get the measure value of double type
+   *
+   * @param ordinal  measure ordinal
+   * @param rowIndex row number
+   * @return measure value of double type
+   */
+  protected double getDoubleMeasureValue(int ordinal, int rowIndex) {
+    return measureDataChunks[ordinal].getMeasureDataHolder()
+        .getReadableDoubleValueByIndex(rowIndex);
+  }
+
+  /**
+   * Below method will be used to get the measure type of big decimal data type
+   *
+   * @param ordinal  ordinal of the of the measure
+   * @param rowIndex row number
+   * @return measure of big decimal type
+   */
+  protected BigDecimal getBigDecimalMeasureValue(int ordinal, int rowIndex) {
+    return measureDataChunks[ordinal].getMeasureDataHolder()
+        .getReadableBigDecimalValueByIndex(rowIndex);
+  }
+
+  /**
+   * will return the current valid row id
+   *
+   * @return valid row id
+   */
+  public abstract int getCurrenrRowId();
+
+  /**
+   * @return dictionary key array for all the dictionary dimension
+   * selected in query
+   */
+  public abstract byte[] getDictionaryKeyArray();
+
+  /**
+   * @return dictionary key array for all the dictionary dimension in integer array forat
+   * selected in query
+   */
+  public abstract int[] getDictionaryKeyIntegerArray();
+
+  /**
+   * Return the dimension data based on dimension ordinal
+   *
+   * @param dimensionOrdinal dimension ordinal
+   * @return dimension data
+   */
+  public abstract byte[] getDimensionKey(int dimensionOrdinal);
+
+  /**
+   * Below method will be used to get the complex type key array
+   *
+   * @return complex type key array
+   */
+  public abstract byte[][] getComplexTypeKeyArray();
+
+  /**
+   * Below method will be used to get the no dictionary key
+   * array for all the no dictionary dimension selected in query
+   *
+   * @return no dictionary key array for all the no dictionary dimension
+   */
+  public abstract byte[][] getNoDictionaryKeyArray();
+
+  /**
+   * Below method will be used to get the no dictionary key
+   * array in string array format for all the no dictionary dimension selected in query
+   *
+   * @return no dictionary key array for all the no dictionary dimension
+   */
+  public abstract String[] getNoDictionaryKeyStringArray();
+
+  /**
+   * Below method will be used to to check whether measure value
+   * is null or for a measure
+   *
+   * @param ordinal measure ordinal
+   * @return is null or not
+   */
+  public abstract boolean isNullMeasureValue(int ordinal);
+
+  /**
+   * Below method will be used to get the measure value for measure
+   * of long data type
+   *
+   * @param ordinal measure ordinal
+   * @return long value of measure
+   */
+  public abstract long getLongMeasureValue(int ordinal);
+
+  /**
+   * Below method will be used to get the value of measure of double
+   * type
+   *
+   * @param ordinal measure ordinal
+   * @return measure value
+   */
+  public abstract double getDoubleMeasureValue(int ordinal);
+
+  /**
+   * Below method will be used to get the data of big decimal type
+   * of a measure
+   *
+   * @param ordinal measure ordinal
+   * @return measure value
+   */
+  public abstract BigDecimal getBigDecimalMeasureValue(int ordinal);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/BatchResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/BatchResult.java b/core/src/main/java/org/apache/carbondata/scan/result/BatchResult.java
new file mode 100644
index 0000000..9bb9c21
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/BatchResult.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.result;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import org.apache.carbondata.common.CarbonIterator;
+
+/**
+ * Below class holds the query result
+ */
+public class BatchResult extends CarbonIterator<Object[]> {
+
+  /**
+   * list of keys
+   */
+  protected List<Object[]> rows;
+
+  /**
+   * counter to check whether all the records are processed or not
+   */
+  protected int counter;
+
+  public BatchResult() {
+    this.rows = new ArrayList<>();
+  }
+
+  /**
+   * Below method will be used to get the rows
+   *
+   * @return
+   */
+  public List<Object[]> getRows() {
+    return rows;
+  }
+
+  /**
+   * Below method will be used to get the set the values
+   *
+   * @param rows
+   */
+  public void setRows(List<Object[]> rows) {
+    this.rows = rows;
+  }
+
+  /**
+   * This method will return one row at a time based on the counter given.
+   * @param counter
+   * @return
+   */
+  public Object[] getRawRow(int counter) {
+    return rows.get(counter);
+  }
+
+  /**
+   * For getting the total size.
+   * @return
+   */
+  public int getSize() {
+    return rows.size();
+  }
+
+
+  /**
+   * Returns {@code true} if the iteration has more elements.
+   *
+   * @return {@code true} if the iteration has more elements
+   */
+  @Override public boolean hasNext() {
+    return counter < rows.size();
+  }
+
+  /**
+   * Returns the next element in the iteration.
+   *
+   * @return the next element in the iteration
+   */
+  @Override public Object[] next() {
+    if (!hasNext()) {
+      throw new NoSuchElementException();
+    }
+    Object[] row = rows.get(counter);
+    counter++;
+    return row;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/Result.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/Result.java b/core/src/main/java/org/apache/carbondata/scan/result/Result.java
new file mode 100644
index 0000000..618af73
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/Result.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.result;
+
+import org.apache.carbondata.scan.wrappers.ByteArrayWrapper;
+
+/**
+ * Result interface for storing the result
+ */
+public interface Result<K, V> {
+  /**
+   * Below method will be used to
+   * add the sccaed result
+   *
+   * @param result
+   */
+  void addScannedResult(K result);
+
+  /**
+   * Returns {@code true} if the iteration has more elements.
+   *
+   * @return {@code true} if the iteration has more elements
+   */
+  boolean hasNext();
+
+  /**
+   * Below method will return the result key
+   *
+   * @return key
+   */
+  ByteArrayWrapper getKey();
+
+  /**
+   * Below code will return the result value
+   *
+   * @return value
+   */
+  V[] getValue();
+
+  void merge(Result<K, V> otherResult);
+
+  /**
+   * Below method will be used to get the result
+   *
+   * @return
+   */
+  K getResult();
+
+  /**
+   * @return size of the result
+   */
+  int size();
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/impl/FilterQueryScannedResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/impl/FilterQueryScannedResult.java b/core/src/main/java/org/apache/carbondata/scan/result/impl/FilterQueryScannedResult.java
new file mode 100644
index 0000000..f1e6594
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/impl/FilterQueryScannedResult.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.result.impl;
+
+import java.math.BigDecimal;
+
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+
+/**
+ * Result provider class in case of filter query
+ * In case of filter query data will be send
+ * based on filtered row index
+ */
+public class FilterQueryScannedResult extends AbstractScannedResult {
+
+  public FilterQueryScannedResult(BlockExecutionInfo tableBlockExecutionInfos) {
+    super(tableBlockExecutionInfos);
+  }
+
+  /**
+   * @return dictionary key array for all the dictionary dimension
+   * selected in query
+   */
+  @Override public byte[] getDictionaryKeyArray() {
+    ++currentRow;
+    return getDictionaryKeyArray(rowMapping[currentRow]);
+  }
+
+  /**
+   * @return dictionary key integer array for all the dictionary dimension
+   * selected in query
+   */
+  @Override public int[] getDictionaryKeyIntegerArray() {
+    ++currentRow;
+    return getDictionaryKeyIntegerArray(rowMapping[currentRow]);
+  }
+
+  /**
+   * Below method will be used to get the complex type key array
+   *
+   * @return complex type key array
+   */
+  @Override public byte[][] getComplexTypeKeyArray() {
+    return getComplexTypeKeyArray(rowMapping[currentRow]);
+  }
+
+  /**
+   * Below method will be used to get the no dictionary key
+   * array for all the no dictionary dimension selected in query
+   *
+   * @return no dictionary key array for all the no dictionary dimension
+   */
+  @Override public byte[][] getNoDictionaryKeyArray() {
+    return getNoDictionaryKeyArray(rowMapping[currentRow]);
+  }
+
+  /**
+   * Below method will be used to get the no dictionary key
+   * string array for all the no dictionary dimension selected in query
+   *
+   * @return no dictionary key array for all the no dictionary dimension
+   */
+  @Override public String[] getNoDictionaryKeyStringArray() {
+    return getNoDictionaryKeyStringArray(rowMapping[currentRow]);
+  }
+
+  /**
+   * will return the current valid row id
+   *
+   * @return valid row id
+   */
+  @Override public int getCurrenrRowId() {
+    return rowMapping[currentRow];
+  }
+
+  /**
+   * Return the dimension data based on dimension ordinal
+   *
+   * @param dimensionOrdinal dimension ordinal
+   * @return dimension data
+   */
+  @Override public byte[] getDimensionKey(int dimensionOrdinal) {
+    return getDimensionData(dimensionOrdinal, rowMapping[currentRow]);
+  }
+
+  /**
+   * Below method will be used to to check whether measure value
+   * is null or for a measure
+   *
+   * @param ordinal measure ordinal
+   * @return is null or not
+   */
+  @Override public boolean isNullMeasureValue(int ordinal) {
+    return isNullMeasureValue(ordinal, rowMapping[currentRow]);
+  }
+
+  /**
+   * Below method will be used to get the measure value for measure
+   * of long data type
+   *
+   * @param ordinal measure ordinal
+   * @return long value of measure
+   */
+  @Override public long getLongMeasureValue(int ordinal) {
+    return getLongMeasureValue(ordinal, rowMapping[currentRow]);
+  }
+
+  /**
+   * Below method will be used to get the value of measure of double
+   * type
+   *
+   * @param ordinal measure ordinal
+   * @return measure value
+   */
+  @Override public double getDoubleMeasureValue(int ordinal) {
+    return getDoubleMeasureValue(ordinal, rowMapping[currentRow]);
+  }
+
+  /**
+   * Below method will be used to get the data of big decimal type
+   * of a measure
+   *
+   * @param ordinal measure ordinal
+   * @return measure value
+   */
+  @Override public BigDecimal getBigDecimalMeasureValue(int ordinal) {
+    return getBigDecimalMeasureValue(ordinal, rowMapping[currentRow]);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/impl/NonFilterQueryScannedResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/impl/NonFilterQueryScannedResult.java b/core/src/main/java/org/apache/carbondata/scan/result/impl/NonFilterQueryScannedResult.java
new file mode 100644
index 0000000..e08d525
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/impl/NonFilterQueryScannedResult.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.result.impl;
+
+import java.math.BigDecimal;
+
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+
+/**
+ * Result provide class for non filter query
+ * In case of no filter query we need to return
+ * complete data
+ */
+public class NonFilterQueryScannedResult extends AbstractScannedResult {
+
+  public NonFilterQueryScannedResult(BlockExecutionInfo blockExecutionInfo) {
+    super(blockExecutionInfo);
+  }
+
+  /**
+   * @return dictionary key array for all the dictionary dimension selected in
+   * query
+   */
+  @Override public byte[] getDictionaryKeyArray() {
+    ++currentRow;
+    return getDictionaryKeyArray(currentRow);
+  }
+
+  /**
+   * @return dictionary key integer array for all the dictionary dimension
+   * selected in query
+   */
+  @Override public int[] getDictionaryKeyIntegerArray() {
+    ++currentRow;
+    return getDictionaryKeyIntegerArray(currentRow);
+  }
+
+  /**
+   * Below method will be used to get the complex type key array
+   *
+   * @return complex type key array
+   */
+  @Override public byte[][] getComplexTypeKeyArray() {
+    return getComplexTypeKeyArray(currentRow);
+  }
+
+  /**
+   * Below method will be used to get the no dictionary key array for all the
+   * no dictionary dimension selected in query
+   *
+   * @return no dictionary key array for all the no dictionary dimension
+   */
+  @Override public byte[][] getNoDictionaryKeyArray() {
+    return getNoDictionaryKeyArray(currentRow);
+  }
+
+  /**
+   * Below method will be used to get the no dictionary key
+   * string array for all the no dictionary dimension selected in query
+   *
+   * @return no dictionary key array for all the no dictionary dimension
+   */
+  @Override public String[] getNoDictionaryKeyStringArray() {
+    return getNoDictionaryKeyStringArray(currentRow);
+  }
+
+  /**
+   * will return the current valid row id
+   *
+   * @return valid row id
+   */
+  @Override public int getCurrenrRowId() {
+    return currentRow;
+  }
+
+  /**
+   * Return the dimension data based on dimension ordinal
+   *
+   * @param dimensionOrdinal dimension ordinal
+   * @return dimension data
+   */
+  @Override public byte[] getDimensionKey(int dimensionOrdinal) {
+    return getDimensionData(dimensionOrdinal, currentRow);
+  }
+
+  /**
+   * Below method will be used to to check whether measure value is null or
+   * for a measure
+   *
+   * @param ordinal measure ordinal
+   * @return is null or not
+   */
+  @Override public boolean isNullMeasureValue(int ordinal) {
+    return isNullMeasureValue(ordinal, currentRow);
+  }
+
+  /**
+   * Below method will be used to get the measure value for measure of long
+   * data type
+   *
+   * @param ordinal measure ordinal
+   * @return long value of measure
+   */
+  @Override public long getLongMeasureValue(int ordinal) {
+    return getLongMeasureValue(ordinal, currentRow);
+  }
+
+  /**
+   * Below method will be used to get the value of measure of double type
+   *
+   * @param ordinal measure ordinal
+   * @return measure value
+   */
+  @Override public double getDoubleMeasureValue(int ordinal) {
+    return getDoubleMeasureValue(ordinal, currentRow);
+  }
+
+  /**
+   * Below method will be used to get the data of big decimal type of a
+   * measure
+   *
+   * @param ordinal measure ordinal
+   * @return measure value
+   */
+  @Override public BigDecimal getBigDecimalMeasureValue(int ordinal) {
+    return getBigDecimalMeasureValue(ordinal, currentRow);
+  }
+
+}


[11/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/CarbonUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonUtil.java b/core/src/main/java/org/carbondata/core/util/CarbonUtil.java
deleted file mode 100644
index eb8a0fc..0000000
--- a/core/src/main/java/org/carbondata/core/util/CarbonUtil.java
+++ /dev/null
@@ -1,1426 +0,0 @@
-/*
-
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.util;
-
-import java.io.Closeable;
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.TableBlockInfo;
-import org.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
-import org.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-import org.carbondata.core.carbon.path.CarbonStorePath;
-import org.carbondata.core.carbon.path.CarbonTablePath;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.columnar.ColumnGroupModel;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
-import org.carbondata.core.datastorage.store.columnar.UnBlockIndexer;
-import org.carbondata.core.datastorage.store.compression.MeasureMetaDataModel;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.filesystem.CarbonFile;
-import org.carbondata.core.datastorage.store.filesystem.CarbonFileFilter;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.keygenerator.mdkey.NumberCompressor;
-import org.carbondata.core.metadata.ValueEncoderMeta;
-import org.carbondata.scan.model.QueryDimension;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.pentaho.di.core.exception.KettleException;
-
-
-public final class CarbonUtil {
-
-  public static final String HDFS_PREFIX = "hdfs://";
-  public static final String VIEWFS_PREFIX = "viewfs://";
-  private static final String FS_DEFAULT_FS = "fs.defaultFS";
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(CarbonUtil.class.getName());
-
-  /**
-   * EIGHT
-   */
-  private static final int CONST_EIGHT = 8;
-
-  /**
-   * SEVEN
-   */
-  private static final int CONST_SEVEN = 7;
-
-  /**
-   * HUNDRED
-   */
-  private static final int CONST_HUNDRED = 100;
-
-  private static final Configuration conf = new Configuration(true);
-
-  private CarbonUtil() {
-
-  }
-
-  /**
-   * This method closes the streams
-   *
-   * @param streams - streams to close.
-   */
-  public static void closeStreams(Closeable... streams) {
-    // Added if to avoid NullPointerException in case one stream is being passed as null
-    if (null != streams) {
-      for (Closeable stream : streams) {
-        if (null != stream) {
-          try {
-            stream.close();
-          } catch (IOException e) {
-            LOGGER.error("Error while closing stream" + stream);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * @param baseStorePath
-   * @return
-   */
-  private static int createBaseStoreFolders(String baseStorePath) {
-    FileFactory.FileType fileType = FileFactory.getFileType(baseStorePath);
-    try {
-      if (!FileFactory.isFileExist(baseStorePath, fileType, false)) {
-        if (!FileFactory.mkdirs(baseStorePath, fileType)) {
-          return -1;
-        }
-      }
-    } catch (Exception e) {
-      return -1;
-    }
-    return 1;
-  }
-
-  /**
-   * This method checks whether Restructure Folder exists or not
-   * and if not exist then return the number with which folder need to created.
-   *
-   * @param baseStorePath -
-   *                      baselocation where folder will be created.
-   * @return counter
-   * counter with which folder will be created.
-   */
-  public static int checkAndReturnCurrentRestructFolderNumber(String baseStorePath,
-      final String filterType, final boolean isDirectory) {
-    if (null == baseStorePath || 0 == baseStorePath.length()) {
-      return -1;
-    }
-    // change the slashes to /
-    baseStorePath = baseStorePath.replace("\\", "/");
-
-    // check if string wnds with / then remove that.
-    if (baseStorePath.charAt(baseStorePath.length() - 1) == '/') {
-      baseStorePath = baseStorePath.substring(0, baseStorePath.lastIndexOf("/"));
-    }
-    int retValue = createBaseStoreFolders(baseStorePath);
-    if (-1 == retValue) {
-      return retValue;
-    }
-
-    CarbonFile carbonFile =
-        FileFactory.getCarbonFile(baseStorePath, FileFactory.getFileType(baseStorePath));
-
-    // List of directories
-    CarbonFile[] listFiles = carbonFile.listFiles(new CarbonFileFilter() {
-      @Override public boolean accept(CarbonFile pathname) {
-        if (isDirectory && pathname.isDirectory()) {
-          if (pathname.getAbsolutePath().indexOf(filterType) > -1) {
-            return true;
-          }
-        } else {
-          if (pathname.getAbsolutePath().indexOf(filterType) > -1) {
-            return true;
-          }
-        }
-
-        return false;
-      }
-    });
-
-    int counter = -1;
-
-    // if no folder exists then return -1
-    if (listFiles.length == 0) {
-      return counter;
-    }
-
-    counter = findCounterValue(filterType, listFiles, counter);
-    return counter;
-  }
-
-  public static int checkAndReturnCurrentLoadFolderNumber(String baseStorePath) {
-    return checkAndReturnCurrentRestructFolderNumber(baseStorePath, "Load_", true);
-  }
-
-  /**
-   * @param filterType
-   * @param listFiles
-   * @param counter
-   * @return
-   */
-  private static int findCounterValue(final String filterType, CarbonFile[] listFiles,
-      int counter) {
-    if ("Load_".equals(filterType)) {
-      for (CarbonFile files : listFiles) {
-        String folderName = getFolderName(files);
-        if (folderName.indexOf('.') > -1) {
-          folderName = folderName.substring(0, folderName.indexOf('.'));
-        }
-        String[] split = folderName.split("_");
-
-        if (split.length > 1 && counter < Integer.parseInt(split[1])) {
-          counter = Integer.parseInt(split[1]);
-        }
-      }
-    } else {
-      // Iterate list of Directories and find the counter value
-      for (CarbonFile eachFile : listFiles) {
-        String folderName = getFolderName(eachFile);
-        String[] split = folderName.split("_");
-        if (counter < Integer.parseInt(split[1])) {
-          counter = Integer.parseInt(split[1]);
-        }
-      }
-    }
-    return counter;
-  }
-
-  /**
-   * @param eachFile
-   * @return
-   */
-  private static String getFolderName(CarbonFile eachFile) {
-    String str = eachFile.getAbsolutePath();
-    str = str.replace("\\", "/");
-    int firstFolderIndex = str.lastIndexOf("/");
-    String folderName = str.substring(firstFolderIndex);
-    return folderName;
-  }
-
-  /**
-   * This method will be used to update the dimension cardinality
-   *
-   * @param dimCardinality
-   * @return new increment cardinality
-   */
-  public static int[] getIncrementedCardinality(int[] dimCardinality) {
-    // get the cardinality incr factor
-    final int incrValue = CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL;
-
-    int perIncr = 0;
-    int remainder = 0;
-    int[] newDimsC = new int[dimCardinality.length];
-    for (int i = 0; i < dimCardinality.length; i++) {
-      // get the incr
-      perIncr = (dimCardinality[i] * incrValue) / CONST_HUNDRED;
-
-      // if per incr is more than one the add to cardinality
-      if (perIncr > 0) {
-        newDimsC[i] = dimCardinality[i] + perIncr;
-      } else {
-        // else add one
-        newDimsC[i] = dimCardinality[i] + 1;
-      }
-      // check whether its in boundary condition
-      remainder = newDimsC[i] % CONST_EIGHT;
-      if (remainder == CONST_SEVEN) {
-        // then incr cardinality by 1
-        newDimsC[i] = dimCardinality[i] + 1;
-      }
-    }
-    // get the log bits of cardinality
-    for (int i = 0; i < newDimsC.length; i++) {
-      newDimsC[i] = Long.toBinaryString(newDimsC[i]).length();
-    }
-    return newDimsC;
-  }
-
-  public static int getIncrementedCardinality(int dimCardinality) {
-    // get the cardinality incr factor
-    final int incrValue = CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL;
-
-    int perIncr = 0;
-    int remainder = 0;
-    int newDimsC = 0;
-
-    // get the incr
-    perIncr = (dimCardinality * incrValue) / CONST_HUNDRED;
-
-    // if per incr is more than one the add to cardinality
-    if (perIncr > 0) {
-      newDimsC = dimCardinality + perIncr;
-    } else {
-      // else add one
-      newDimsC = dimCardinality + 1;
-    }
-    // check whether its in boundary condition
-    remainder = newDimsC % CONST_EIGHT;
-    if (remainder == CONST_SEVEN) {
-      // then incr cardinality by 1
-      newDimsC = dimCardinality + 1;
-    }
-    newDimsC = Long.toBinaryString(newDimsC).length();
-    // get the log bits of cardinality
-
-    return newDimsC;
-  }
-
-  /**
-   * return ColumnGroupModel. check ColumnGroupModel for detail
-   *
-   * @param columnGroups : column groups
-   * @return ColumnGroupModel  model
-   */
-  public static ColumnGroupModel getColGroupModel(int[][] columnGroups) {
-    int[] columnSplit = new int[columnGroups.length];
-    int noOfColumnStore = columnSplit.length;
-    boolean[] columnarStore = new boolean[noOfColumnStore];
-
-    for (int i = 0; i < columnGroups.length; i++) {
-      columnSplit[i] = columnGroups[i].length;
-      columnarStore[i] = columnGroups[i].length > 1 ? false : true;
-    }
-    ColumnGroupModel colGroupModel = new ColumnGroupModel();
-    colGroupModel.setNoOfColumnStore(noOfColumnStore);
-    colGroupModel.setColumnSplit(columnSplit);
-    colGroupModel.setColumnarStore(columnarStore);
-    colGroupModel.setColumnGroup(columnGroups);
-    return colGroupModel;
-  }
-
-  /**
-   * This method will be used to update the dimension cardinality
-   *
-   * @param dimCardinality
-   * @return new increment cardinality
-   */
-  public static int[] getIncrementedCardinalityFullyFilled(int[] dimCardinality) {
-    int[] newDimsC = new int[dimCardinality.length];
-    // get the log bits of cardinality
-    for (int i = 0; i < dimCardinality.length; i++) {
-      if (dimCardinality[i] == 0) {
-        //Array or struct type may have higher value
-        newDimsC[i] = 64;
-      } else {
-        int bitsLength = Long.toBinaryString(dimCardinality[i]).length();
-        int div = bitsLength / 8;
-        int mod = bitsLength % 8;
-        if (mod > 0) {
-          newDimsC[i] = 8 * (div + 1);
-        } else {
-          newDimsC[i] = bitsLength;
-        }
-      }
-    }
-    return newDimsC;
-  }
-
-  private static int getBitLengthFullyFilled(int dimlens) {
-    int bitsLength = Long.toBinaryString(dimlens).length();
-    int div = bitsLength / 8;
-    int mod = bitsLength % 8;
-    if (mod > 0) {
-      return 8 * (div + 1);
-    } else {
-      return bitsLength;
-    }
-  }
-
-  /**
-   * This method will be used to delete the folder and files
-   *
-   * @param path file path array
-   * @throws Exception exception
-   */
-  public static void deleteFoldersAndFiles(final File... path) throws CarbonUtilException {
-    try {
-      UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
-
-        @Override public Void run() throws Exception {
-          for (int i = 0; i < path.length; i++) {
-            deleteRecursive(path[i]);
-          }
-          return null;
-        }
-      });
-    } catch (IOException e) {
-      throw new CarbonUtilException("Error while deleting the folders and files");
-    } catch (InterruptedException e) {
-      throw new CarbonUtilException("Error while deleting the folders and files");
-    }
-
-  }
-
-  /**
-   * Recursively delete the files
-   *
-   * @param f File to be deleted
-   * @throws CarbonUtilException
-   */
-  private static void deleteRecursive(File f) throws CarbonUtilException {
-    if (f.isDirectory()) {
-      if (f.listFiles() != null) {
-        for (File c : f.listFiles()) {
-          deleteRecursive(c);
-        }
-      }
-    }
-    if (f.exists() && !f.delete()) {
-      throw new CarbonUtilException("Error while deleting the folders and files");
-    }
-  }
-
-  public static void deleteFoldersAndFiles(final CarbonFile... file) throws CarbonUtilException {
-    try {
-      UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
-
-        @Override public Void run() throws Exception {
-          for (int i = 0; i < file.length; i++) {
-            deleteRecursive(file[i]);
-          }
-          return null;
-        }
-      });
-    } catch (IOException e) {
-      throw new CarbonUtilException("Error while deleting the folders and files");
-    } catch (InterruptedException e) {
-      throw new CarbonUtilException("Error while deleting the folders and files");
-    }
-  }
-
-  public static String getBadLogPath(String storeLocation) {
-    String badLogStoreLocation =
-            CarbonProperties.getInstance().getProperty(CarbonCommonConstants.CARBON_BADRECORDS_LOC);
-    badLogStoreLocation = badLogStoreLocation + File.separator + storeLocation;
-
-    return badLogStoreLocation;
-  }
-
-  public static void deleteFoldersAndFilesSilent(final CarbonFile... file)
-      throws CarbonUtilException {
-    try {
-      UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
-
-        @Override public Void run() throws Exception {
-          for (int i = 0; i < file.length; i++) {
-            deleteRecursiveSilent(file[i]);
-          }
-          return null;
-        }
-      });
-    } catch (IOException e) {
-      throw new CarbonUtilException("Error while deleting the folders and files");
-    } catch (InterruptedException e) {
-      throw new CarbonUtilException("Error while deleting the folders and files");
-    }
-  }
-
-  /**
-   * This function will rename the table to be deleted
-   *
-   * @param partitionCount
-   * @param storePath
-   * @param databaseName
-   * @param tableName
-   */
-  public static void renameTableForDeletion(int partitionCount, String storePath,
-      String databaseName, String tableName) {
-    String tableNameWithPartition = "";
-    String databaseNameWithPartition = "";
-    String fullPath = "";
-    String newFilePath = "";
-    String newFileName = "";
-    Callable<Void> c = null;
-    long time = System.currentTimeMillis();
-    FileFactory.FileType fileType = null;
-    ExecutorService executorService = Executors.newFixedThreadPool(10);
-    for (int i = 0; i < partitionCount; i++) {
-      databaseNameWithPartition = databaseName + '_' + i;
-      tableNameWithPartition = tableName + '_' + i;
-      newFileName = tableNameWithPartition + '_' + time;
-      fullPath = storePath + File.separator + databaseNameWithPartition + File.separator
-          + tableNameWithPartition;
-      newFilePath =
-          storePath + File.separator + databaseNameWithPartition + File.separator + newFileName;
-      fileType = FileFactory.getFileType(fullPath);
-      try {
-        if (FileFactory.isFileExist(fullPath, fileType)) {
-          CarbonFile file = FileFactory.getCarbonFile(fullPath, fileType);
-          boolean isRenameSuccessfull = file.renameTo(newFilePath);
-          if (!isRenameSuccessfull) {
-            LOGGER.error("Problem renaming the table :: " + fullPath);
-            c = new DeleteFolderAndFiles(file);
-            executorService.submit(c);
-          } else {
-            c = new DeleteFolderAndFiles(FileFactory.getCarbonFile(newFilePath, fileType));
-            executorService.submit(c);
-          }
-        }
-      } catch (IOException e) {
-        LOGGER.error("Problem renaming the table :: " + fullPath);
-      }
-    }
-    executorService.shutdown();
-  }
-
-  /**
-   * Recursively delete the files
-   *
-   * @param f File to be deleted
-   * @throws CarbonUtilException
-   */
-  private static void deleteRecursive(CarbonFile f) throws CarbonUtilException {
-    if (f.isDirectory()) {
-      if (f.listFiles() != null) {
-        for (CarbonFile c : f.listFiles()) {
-          deleteRecursive(c);
-        }
-      }
-    }
-    if (f.exists() && !f.delete()) {
-      throw new CarbonUtilException("Error while deleting the folders and files");
-    }
-  }
-
-  private static void deleteRecursiveSilent(CarbonFile f) throws CarbonUtilException {
-    if (f.isDirectory()) {
-      if (f.listFiles() != null) {
-        for (CarbonFile c : f.listFiles()) {
-          deleteRecursiveSilent(c);
-        }
-      }
-    }
-    if (f.exists() && !f.delete()) {
-      return;
-    }
-  }
-
-  public static void deleteFiles(File[] intermediateFiles) throws CarbonUtilException {
-    for (int i = 0; i < intermediateFiles.length; i++) {
-      if (!intermediateFiles[i].delete()) {
-        throw new CarbonUtilException("Problem while deleting intermediate file");
-      }
-    }
-  }
-
-  public static byte[] getKeyArray(ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolder,
-      int totalKeySize, int eachKeySize) {
-    byte[] completeKeyArray = new byte[totalKeySize];
-    byte[] keyBlockData = null;
-    int destinationPosition = 0;
-    int[] columnIndex = null;
-    int blockKeySize = 0;
-    for (int i = 0; i < columnarKeyStoreDataHolder.length; i++) {
-      keyBlockData = columnarKeyStoreDataHolder[i].getKeyBlockData();
-      blockKeySize = columnarKeyStoreDataHolder[i].getColumnarKeyStoreMetadata().getEachRowSize();
-      if (columnarKeyStoreDataHolder[i].getColumnarKeyStoreMetadata().isSorted()) {
-        for (int j = 0; j < keyBlockData.length; j += blockKeySize) {
-          System.arraycopy(keyBlockData, j, completeKeyArray, destinationPosition, blockKeySize);
-          destinationPosition += eachKeySize;
-        }
-      } else {
-        columnIndex = columnarKeyStoreDataHolder[i].getColumnarKeyStoreMetadata().getColumnIndex();
-
-        for (int j = 0; j < columnIndex.length; j++) {
-          System.arraycopy(keyBlockData, columnIndex[j] * blockKeySize, completeKeyArray,
-              eachKeySize * columnIndex[j] + destinationPosition, blockKeySize);
-        }
-      }
-      destinationPosition = blockKeySize;
-    }
-    return completeKeyArray;
-  }
-
-  public static byte[] getKeyArray(ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolder,
-      int totalKeySize, int eachKeySize, short[] columnIndex) {
-    byte[] completeKeyArray = new byte[totalKeySize];
-    byte[] keyBlockData = null;
-    int destinationPosition = 0;
-    int blockKeySize = 0;
-    for (int i = 0; i < columnarKeyStoreDataHolder.length; i++) {
-      keyBlockData = columnarKeyStoreDataHolder[i].getKeyBlockData();
-      blockKeySize = columnarKeyStoreDataHolder[i].getColumnarKeyStoreMetadata().getEachRowSize();
-
-      for (int j = 0; j < columnIndex.length; j++) {
-        System.arraycopy(keyBlockData, columnIndex[j] * blockKeySize, completeKeyArray,
-            destinationPosition, blockKeySize);
-        destinationPosition += eachKeySize;
-      }
-      destinationPosition = blockKeySize;
-    }
-    return completeKeyArray;
-  }
-
-  public static int getFirstIndexUsingBinarySearch(FixedLengthDimensionDataChunk dimColumnDataChunk,
-      int low, int high, byte[] compareValue, boolean matchUpLimit) {
-    int cmpResult = 0;
-    while (high >= low) {
-      int mid = (low + high) / 2;
-      cmpResult = ByteUtil.UnsafeComparer.INSTANCE
-          .compareTo(dimColumnDataChunk.getCompleteDataChunk(), mid * compareValue.length,
-              compareValue.length, compareValue, 0, compareValue.length);
-      if (cmpResult < 0) {
-        low = mid + 1;
-      } else if (cmpResult > 0) {
-        high = mid - 1;
-      } else {
-        int currentIndex = mid;
-        if(!matchUpLimit) {
-          while (currentIndex - 1 >= 0 && ByteUtil.UnsafeComparer.INSTANCE
-              .compareTo(dimColumnDataChunk.getCompleteDataChunk(),
-                  (currentIndex - 1) * compareValue.length, compareValue.length, compareValue, 0,
-                  compareValue.length) == 0) {
-            --currentIndex;
-          }
-        } else {
-          while (currentIndex + 1 <= high && ByteUtil.UnsafeComparer.INSTANCE
-              .compareTo(dimColumnDataChunk.getCompleteDataChunk(),
-                  (currentIndex + 1) * compareValue.length, compareValue.length, compareValue, 0,
-                  compareValue.length) == 0) {
-            currentIndex++;
-          }
-        }
-        return currentIndex;
-      }
-    }
-    return -(low + 1);
-  }
-
-  /**
-   * Method will identify the value which is lesser than the pivot element
-   * on which range filter is been applied.
-   *
-   * @param currentIndex
-   * @param dimColumnDataChunk
-   * @param compareValue
-   * @return index value
-   */
-  public static int nextLesserValueToTarget(int currentIndex,
-      FixedLengthDimensionDataChunk dimColumnDataChunk, byte[] compareValue) {
-    while (currentIndex - 1 >= 0 && ByteUtil.UnsafeComparer.INSTANCE
-        .compareTo(dimColumnDataChunk.getCompleteDataChunk(),
-            (currentIndex - 1) * compareValue.length, compareValue.length, compareValue, 0,
-            compareValue.length) >= 0) {
-      --currentIndex;
-    }
-
-    return --currentIndex;
-  }
-
-  /**
-   * Method will identify the value which is greater than the pivot element
-   * on which range filter is been applied.
-   *
-   * @param currentIndex
-   * @param dimColumnDataChunk
-   * @param compareValue
-   * @param numerOfRows
-   * @return index value
-   */
-  public static int nextGreaterValueToTarget(int currentIndex,
-      FixedLengthDimensionDataChunk dimColumnDataChunk, byte[] compareValue, int numerOfRows) {
-    while (currentIndex + 1 < numerOfRows && ByteUtil.UnsafeComparer.INSTANCE
-        .compareTo(dimColumnDataChunk.getCompleteDataChunk(),
-            (currentIndex + 1) * compareValue.length, compareValue.length, compareValue, 0,
-            compareValue.length) <= 0) {
-      ++currentIndex;
-    }
-
-    return ++currentIndex;
-  }
-
-  public static int[] getUnCompressColumnIndex(int totalLength, byte[] columnIndexData,
-      NumberCompressor numberCompressor) {
-    ByteBuffer buffer = ByteBuffer.wrap(columnIndexData);
-    buffer.rewind();
-    int indexDataLength = buffer.getInt();
-    byte[] indexData = new byte[indexDataLength];
-    byte[] indexMap =
-        new byte[totalLength - indexDataLength - CarbonCommonConstants.INT_SIZE_IN_BYTE];
-    buffer.get(indexData);
-    buffer.get(indexMap);
-    return UnBlockIndexer.uncompressIndex(numberCompressor.unCompress(indexData),
-        numberCompressor.unCompress(indexMap));
-  }
-
-  /**
-   * Convert int array to Integer list
-   *
-   * @param array
-   * @return List<Integer>
-   */
-  public static List<Integer> convertToIntegerList(int[] array) {
-    List<Integer> integers = new ArrayList<Integer>();
-    for (int i = 0; i < array.length; i++) {
-      integers.add(array[i]);
-    }
-    return integers;
-  }
-
-  /**
-   * Read level metadata file and return cardinality
-   *
-   * @param levelPath
-   * @return
-   * @throws CarbonUtilException
-   */
-  public static int[] getCardinalityFromLevelMetadataFile(String levelPath)
-      throws CarbonUtilException {
-    DataInputStream dataInputStream = null;
-    int[] cardinality = null;
-
-    try {
-      if (FileFactory.isFileExist(levelPath, FileFactory.getFileType(levelPath))) {
-        dataInputStream =
-            FileFactory.getDataInputStream(levelPath, FileFactory.getFileType(levelPath));
-
-        cardinality = new int[dataInputStream.readInt()];
-
-        for (int i = 0; i < cardinality.length; i++) {
-          cardinality[i] = dataInputStream.readInt();
-        }
-      }
-    } catch (FileNotFoundException e) {
-      throw new CarbonUtilException("Problem while getting the file", e);
-    } catch (IOException e) {
-      throw new CarbonUtilException("Problem while reading the file", e);
-    } finally {
-      closeStreams(dataInputStream);
-    }
-
-    return cardinality;
-  }
-
-  public static void writeLevelCardinalityFile(String loadFolderLoc, String tableName,
-      int[] dimCardinality) throws KettleException {
-    String levelCardinalityFilePath = loadFolderLoc + File.separator +
-        CarbonCommonConstants.LEVEL_METADATA_FILE + tableName
-        + CarbonCommonConstants.CARBON_METADATA_EXTENSION;
-    FileOutputStream fileOutputStream = null;
-    FileChannel channel = null;
-    try {
-      int dimCardinalityArrLength = dimCardinality.length;
-
-      // first four bytes for writing the length of array, remaining for array data
-      ByteBuffer buffer = ByteBuffer.allocate(CarbonCommonConstants.INT_SIZE_IN_BYTE
-          + dimCardinalityArrLength * CarbonCommonConstants.INT_SIZE_IN_BYTE);
-
-      fileOutputStream = new FileOutputStream(levelCardinalityFilePath);
-      channel = fileOutputStream.getChannel();
-      buffer.putInt(dimCardinalityArrLength);
-
-      for (int i = 0; i < dimCardinalityArrLength; i++) {
-        buffer.putInt(dimCardinality[i]);
-      }
-
-      buffer.flip();
-      channel.write(buffer);
-      buffer.clear();
-
-      LOGGER.info("Level cardinality file written to : " + levelCardinalityFilePath);
-    } catch (IOException e) {
-      LOGGER.error("Error while writing level cardinality file : " + levelCardinalityFilePath + e
-          .getMessage());
-      throw new KettleException("Not able to write level cardinality file", e);
-    } finally {
-      closeStreams(channel, fileOutputStream);
-    }
-  }
-
-  /**
-   * From beeline if a delimeter is passed as \001, in code we get it as
-   * escaped string as \\001. So this method will unescape the slash again and
-   * convert it back t0 \001
-   *
-   * @param parseStr
-   * @return
-   */
-  public static String unescapeChar(String parseStr) {
-    switch (parseStr) {
-      case "\\001":
-        return "\001";
-      case "\\t":
-        return "\t";
-      case "\\r":
-        return "\r";
-      case "\\b":
-        return "\b";
-      case "\\f":
-        return "\f";
-      case "\\n":
-        return "\n";
-      default:
-        return parseStr;
-    }
-  }
-
-  public static String escapeComplexDelimiterChar(String parseStr) {
-    switch (parseStr) {
-      case "$":
-        return "\\$";
-      case ":":
-        return "\\:";
-      default:
-        return parseStr;
-    }
-  }
-
-  /**
-   * Append HDFS Base Url for show create & load data sql
-   *
-   * @param filePath
-   */
-  public static String checkAndAppendHDFSUrl(String filePath) {
-    String currentPath = filePath;
-    if (null != filePath && filePath.length() != 0 &&
-        FileFactory.getFileType(filePath) != FileFactory.FileType.HDFS &&
-        FileFactory.getFileType(filePath) != FileFactory.FileType.VIEWFS) {
-      String baseDFSUrl = CarbonProperties.getInstance()
-          .getProperty(CarbonCommonConstants.CARBON_DDL_BASE_HDFS_URL);
-      if (null != baseDFSUrl) {
-        String dfsUrl = conf.get(FS_DEFAULT_FS);
-        if (null != dfsUrl && (dfsUrl.startsWith(HDFS_PREFIX) || dfsUrl
-            .startsWith(VIEWFS_PREFIX))) {
-          baseDFSUrl = dfsUrl + baseDFSUrl;
-        }
-        if (baseDFSUrl.endsWith("/")) {
-          baseDFSUrl = baseDFSUrl.substring(0, baseDFSUrl.length() - 1);
-        }
-        if (!filePath.startsWith("/")) {
-          filePath = "/" + filePath;
-        }
-        currentPath = baseDFSUrl + filePath;
-      }
-    }
-    return currentPath;
-  }
-
-  /**
-   * @param location
-   * @param factTableName
-   * @return
-   */
-  public static int getRestructureNumber(String location, String factTableName) {
-    String restructName =
-        location.substring(location.indexOf(CarbonCommonConstants.RESTRUCTRE_FOLDER));
-    int factTableIndex = restructName.indexOf(factTableName) - 1;
-    String restructNumber =
-        restructName.substring(CarbonCommonConstants.RESTRUCTRE_FOLDER.length(), factTableIndex);
-    return Integer.parseInt(restructNumber);
-  }
-
-  /**
-   * Below method will be used to get the aggregator type
-   * CarbonCommonConstants.SUM_COUNT_VALUE_MEASURE will return when value is double measure
-   * CarbonCommonConstants.BYTE_VALUE_MEASURE will be returned when value is byte array
-   *
-   * @param agg
-   * @return aggregator type
-   */
-  public static char getType(String agg) {
-    if (CarbonCommonConstants.SUM.equals(agg) || CarbonCommonConstants.COUNT.equals(agg)) {
-      return CarbonCommonConstants.SUM_COUNT_VALUE_MEASURE;
-    } else {
-      return CarbonCommonConstants.BYTE_VALUE_MEASURE;
-    }
-  }
-
-  public static String getCarbonStorePath(String databaseName, String tableName) {
-    CarbonProperties prop = CarbonProperties.getInstance();
-    if (null == prop) {
-      return null;
-    }
-    String basePath = prop.getProperty(CarbonCommonConstants.STORE_LOCATION,
-        CarbonCommonConstants.STORE_LOCATION_DEFAULT_VAL);
-    return basePath;
-  }
-
-  /**
-   * This method will check the existence of a file at a given path
-   */
-  public static boolean isFileExists(String fileName) {
-    try {
-      FileFactory.FileType fileType = FileFactory.getFileType(fileName);
-      if (FileFactory.isFileExist(fileName, fileType)) {
-        return true;
-      }
-    } catch (IOException e) {
-      LOGGER.error("@@@@@@  File not found at a given location @@@@@@ : " + fileName);
-    }
-    return false;
-  }
-
-  /**
-   * This method will check and create the given path
-   */
-  public static boolean checkAndCreateFolder(String path) {
-    boolean created = false;
-    try {
-      FileFactory.FileType fileType = FileFactory.getFileType(path);
-      if (FileFactory.isFileExist(path, fileType)) {
-        created = true;
-      } else {
-        created = FileFactory.mkdirs(path, fileType);
-      }
-    } catch (IOException e) {
-      LOGGER.error(e.getMessage());
-    }
-    return created;
-  }
-
-  /**
-   * This method will return the size of a given file
-   */
-  public static long getFileSize(String filePath) {
-    FileFactory.FileType fileType = FileFactory.getFileType(filePath);
-    CarbonFile carbonFile = FileFactory.getCarbonFile(filePath, fileType);
-    return carbonFile.getSize();
-  }
-
-  /**
-   * This method will be used to get bit length of the dimensions based on the
-   * dimension partitioner. If partitioner is value is 1 the column
-   * cardinality will be incremented in such a way it will fit in byte level.
-   * for example if number of bits required to store one column value is 3
-   * bits the 8 bit will be assigned to each value of that column.In this way
-   * we may waste some bits(maximum 7 bits) If partitioner value is more than
-   * 1 then few column are stored together. so cardinality of that group will
-   * be incremented to fit in byte level For example: if cardinality for 3
-   * columns stored together is [1,1,1] then number of bits required will be
-   * [1,1,1] then last value will be incremented and it will become[1,1,6]
-   *
-   * @param dimCardinality cardinality of each column
-   * @param dimPartitioner Partitioner is how column is stored if value is 1 then column
-   *                       wise if value is more than 1 then it is in group with other
-   *                       column
-   * @return number of bits for each column
-   * @TODO for row group only last value is incremented problem in this cases
-   * in if last column in that group is selected most of the time in
-   * filter query Comparison will be more if it incremented uniformly
-   * then comparison will be distributed
-   */
-  public static int[] getDimensionBitLength(int[] dimCardinality, int[] dimPartitioner) {
-    int[] bitLength = new int[dimCardinality.length];
-    int dimCounter = 0;
-    for (int i = 0; i < dimPartitioner.length; i++) {
-      if (dimPartitioner[i] == 1) {
-        // for columnar store
-        // fully filled bits means complete byte or number of bits
-        // assigned will be in
-        // multiplication of 8
-        bitLength[dimCounter] = getBitLengthFullyFilled(dimCardinality[dimCounter]);
-        dimCounter++;
-      } else {
-        // for row store
-        int totalSize = 0;
-        for (int j = 0; j < dimPartitioner[i]; j++) {
-          bitLength[dimCounter] = getIncrementedCardinality(dimCardinality[dimCounter]);
-          totalSize += bitLength[dimCounter];
-          dimCounter++;
-        }
-        // below code is to increment in such a way that row group will
-        // be stored
-        // as byte level
-        int mod = totalSize % 8;
-        if (mod > 0) {
-          bitLength[dimCounter - 1] = bitLength[dimCounter - 1] + (8 - mod);
-        }
-      }
-    }
-    return bitLength;
-  }
-
-  /**
-   * Below method will be used to get the value compression model of the
-   * measure data chunk
-   *
-   * @param measureDataChunkList
-   * @return value compression model
-   */
-  public static ValueCompressionModel getValueCompressionModel(
-      List<DataChunk> measureDataChunkList) {
-    Object[] maxValue = new Object[measureDataChunkList.size()];
-    Object[] minValue = new Object[measureDataChunkList.size()];
-    Object[] uniqueValue = new Object[measureDataChunkList.size()];
-    int[] decimal = new int[measureDataChunkList.size()];
-    char[] type = new char[measureDataChunkList.size()];
-    byte[] dataTypeSelected = new byte[measureDataChunkList.size()];
-
-    /**
-     * to fill the meta data required for value compression model
-     */
-    for (int i = 0; i < dataTypeSelected.length; i++) {
-      int indexOf = measureDataChunkList.get(i).getEncodingList().indexOf(Encoding.DELTA);
-      if (indexOf > -1) {
-        ValueEncoderMeta valueEncoderMeta =
-            measureDataChunkList.get(i).getValueEncoderMeta().get(indexOf);
-        maxValue[i] = valueEncoderMeta.getMaxValue();
-        minValue[i] = valueEncoderMeta.getMinValue();
-        uniqueValue[i] = valueEncoderMeta.getUniqueValue();
-        decimal[i] = valueEncoderMeta.getDecimal();
-        type[i] = valueEncoderMeta.getType();
-        dataTypeSelected[i] = valueEncoderMeta.getDataTypeSelected();
-      }
-    }
-    MeasureMetaDataModel measureMetadataModel =
-        new MeasureMetaDataModel(minValue, maxValue, decimal, dataTypeSelected.length, uniqueValue,
-            type, dataTypeSelected);
-    return ValueCompressionUtil.getValueCompressionModel(measureMetadataModel);
-  }
-
-  /**
-   * Below method will be used to check whether particular encoding is present
-   * in the dimension or not
-   *
-   * @param encoding  encoding to search
-   * @return if encoding is present in dimension
-   */
-  public static boolean hasEncoding(List<Encoding> encodings, Encoding encoding) {
-    return encodings.contains(encoding);
-  }
-
-  /**
-   * below method is to check whether data type is present in the data type array
-   *
-   * @param dataType  data type to be searched
-   * @param dataTypes all data types
-   * @return if data type is present
-   */
-  public static boolean hasDataType(DataType dataType, DataType[] dataTypes) {
-    for (int i = 0; i < dataTypes.length; i++) {
-      if (dataType.equals(dataTypes[i])) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * below method is to check whether it is complex data type
-   *
-   * @param dataType  data type to be searched
-   * @return if data type is present
-   */
-  public static boolean hasComplexDataType(DataType dataType) {
-    switch (dataType) {
-      case ARRAY :
-      case STRUCT:
-      case MAP:
-        return true;
-      default:
-        return false;
-    }
-  }
-
-  public static boolean[] getDictionaryEncodingArray(QueryDimension[] queryDimensions) {
-    boolean[] dictionaryEncodingArray = new boolean[queryDimensions.length];
-    for (int i = 0; i < queryDimensions.length; i++) {
-      dictionaryEncodingArray[i] =
-          queryDimensions[i].getDimension().hasEncoding(Encoding.DICTIONARY);
-    }
-    return dictionaryEncodingArray;
-  }
-
-  public static boolean[] getDirectDictionaryEncodingArray(QueryDimension[] queryDimensions) {
-    boolean[] dictionaryEncodingArray = new boolean[queryDimensions.length];
-    for (int i = 0; i < queryDimensions.length; i++) {
-      dictionaryEncodingArray[i] =
-          queryDimensions[i].getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY);
-    }
-    return dictionaryEncodingArray;
-  }
-
-  public static boolean[] getComplexDataTypeArray(QueryDimension[] queryDimensions) {
-    boolean[] dictionaryEncodingArray = new boolean[queryDimensions.length];
-    for (int i = 0; i < queryDimensions.length; i++) {
-      dictionaryEncodingArray[i] =
-          CarbonUtil.hasComplexDataType(queryDimensions[i].getDimension().getDataType());
-    }
-    return dictionaryEncodingArray;
-  }
-
-  /**
-   * Below method will be used to read the data file matadata
-   *
-   * @param filePath file path
-   * @param blockOffset   offset in the file
-   * @return Data file metadata instance
-   * @throws CarbonUtilException
-   */
-  public static DataFileFooter readMetadatFile(String filePath, long blockOffset, long blockLength)
-      throws CarbonUtilException {
-    DataFileFooterConverter fileFooterConverter = new DataFileFooterConverter();
-    try {
-      return fileFooterConverter.readDataFileFooter(filePath, blockOffset, blockLength);
-    } catch (IOException e) {
-      throw new CarbonUtilException("Problem while reading the file metadata", e);
-    }
-  }
-
-  /**
-   * Below method will be used to get the surrogate key
-   *
-   * @param data   actual data
-   * @param buffer byte buffer which will be used to convert the data to integer value
-   * @return surrogate key
-   */
-  public static int getSurrogateKey(byte[] data, ByteBuffer buffer) {
-    int lenght = 4 - data.length;
-    for (int i = 0; i < lenght; i++) {
-      buffer.put((byte) 0);
-    }
-    buffer.put(data);
-    buffer.rewind();
-    int surrogate = buffer.getInt();
-    buffer.clear();
-    return surrogate;
-  }
-
-  /**
-   * Thread to delete the tables
-   *
-   */
-  private static final class DeleteFolderAndFiles implements Callable<Void> {
-    private CarbonFile file;
-
-    private DeleteFolderAndFiles(CarbonFile file) {
-      this.file = file;
-    }
-
-    @Override public Void call() throws Exception {
-      deleteFoldersAndFiles(file);
-      return null;
-    }
-
-  }
-
-  /**
-   * class to sort aggregate folder list in descending order
-   */
-  private static class AggTableComparator implements Comparator<String> {
-    public int compare(String aggTable1, String aggTable2) {
-      int index1 = aggTable1.lastIndexOf(CarbonCommonConstants.UNDERSCORE);
-      int index2 = aggTable2.lastIndexOf(CarbonCommonConstants.UNDERSCORE);
-      int n1 = Integer.parseInt(aggTable1.substring(index1 + 1));
-      int n2 = Integer.parseInt(aggTable2.substring(index2 + 1));
-      if (n1 > n2) {
-        return -1;
-      } else if (n1 < n2) {
-        return 1;
-      } else {
-        return 0;
-      }
-    }
-  }
-
-  /**
-   * Below method will be used to get the dimension
-   *
-   * @param tableDimensionList table dimension list
-   * @return boolean array specifying true if dimension is dictionary
-   * and false if dimension is not a dictionary column
-   */
-  public static boolean[] identifyDimensionType(List<CarbonDimension> tableDimensionList) {
-    List<Boolean> isDictionaryDimensions = new ArrayList<Boolean>();
-    Set<Integer> processedColumnGroup = new HashSet<Integer>();
-    for (CarbonDimension carbonDimension : tableDimensionList) {
-      List<CarbonDimension> childs = carbonDimension.getListOfChildDimensions();
-      //assuming complex dimensions will always be atlast
-      if(null != childs && childs.size() > 0) {
-        break;
-      }
-      if (carbonDimension.isColumnar() && hasEncoding(carbonDimension.getEncoder(),
-          Encoding.DICTIONARY)) {
-        isDictionaryDimensions.add(true);
-      } else if (!carbonDimension.isColumnar()) {
-        if (processedColumnGroup.add(carbonDimension.columnGroupId())) {
-          isDictionaryDimensions.add(true);
-        }
-      } else {
-        isDictionaryDimensions.add(false);
-      }
-    }
-    boolean[] primitive = ArrayUtils
-        .toPrimitive(isDictionaryDimensions.toArray(new Boolean[isDictionaryDimensions.size()]));
-    return primitive;
-  }
-
-  /**
-   * This method will form one single byte [] for all the high card dims.
-   * First it will add all the indexes of variable length byte[] and then the
-   * actual value
-   *
-   * @param byteBufferArr
-   * @return byte[] key.
-   */
-  public static byte[] packByteBufferIntoSingleByteArray(ByteBuffer[] byteBufferArr) {
-    // for empty array means there is no data to remove dictionary.
-    if (null == byteBufferArr || byteBufferArr.length == 0) {
-      return null;
-    }
-    int noOfCol = byteBufferArr.length;
-    short offsetLen = (short) (noOfCol * 2);
-    int totalBytes = calculateTotalBytes(byteBufferArr) + offsetLen;
-    ByteBuffer buffer = ByteBuffer.allocate(totalBytes);
-    // writing the offset of the first element.
-    buffer.putShort(offsetLen);
-
-    // prepare index for byte []
-    for (int index = 0; index < byteBufferArr.length - 1; index++) {
-      ByteBuffer individualCol = byteBufferArr[index];
-      int noOfBytes = individualCol.capacity();
-      buffer.putShort((short) (offsetLen + noOfBytes));
-      offsetLen += noOfBytes;
-      individualCol.rewind();
-    }
-
-    // put actual data.
-    for (int index = 0; index < byteBufferArr.length; index++) {
-      ByteBuffer individualCol = byteBufferArr[index];
-      buffer.put(individualCol.array());
-    }
-
-    buffer.rewind();
-    return buffer.array();
-
-  }
-
-  /**
-   * To calculate the total bytes in byte Buffer[].
-   *
-   * @param byteBufferArr
-   * @return
-   */
-  private static int calculateTotalBytes(ByteBuffer[] byteBufferArr) {
-    int total = 0;
-    for (int index = 0; index < byteBufferArr.length; index++) {
-      total += byteBufferArr[index].capacity();
-    }
-    return total;
-  }
-
-  /**
-   * Find the dimension from metadata by using unique name. As of now we are
-   * taking level name as unique name. But user needs to give one unique name
-   * for each level,that level he needs to mention in query.
-   *
-   * @param dimensions
-   * @param carbonDim
-   * @return
-   */
-  public static CarbonDimension findDimension(List<CarbonDimension> dimensions, String carbonDim) {
-    CarbonDimension findDim = null;
-    for (CarbonDimension dimension : dimensions) {
-      if (dimension.getColName().equalsIgnoreCase(carbonDim)) {
-        findDim = dimension;
-        break;
-      }
-    }
-    return findDim;
-  }
-
-  /**
-   * This method will be used to clear the dictionary cache after its usage is complete
-   * so that if memory threshold is reached it can evicted from LRU cache
-   *
-   * @param dictionary
-   */
-  public static void clearDictionaryCache(Dictionary dictionary) {
-    if (null != dictionary) {
-      dictionary.clear();
-    }
-  }
-
-  /**
-   * convert from wrapper to external data type
-   *
-   * @param dataType
-   * @return
-   */
-  public static org.carbondata.format.DataType fromWrapperToExternalDataType(DataType dataType) {
-
-    if (null == dataType) {
-      return null;
-    }
-    switch (dataType) {
-      case STRING:
-        return org.carbondata.format.DataType.STRING;
-      case INT:
-        return org.carbondata.format.DataType.INT;
-      case LONG:
-        return org.carbondata.format.DataType.LONG;
-      case DOUBLE:
-        return org.carbondata.format.DataType.DOUBLE;
-      case DECIMAL:
-        return org.carbondata.format.DataType.DECIMAL;
-      case TIMESTAMP:
-        return org.carbondata.format.DataType.TIMESTAMP;
-      case ARRAY:
-        return org.carbondata.format.DataType.ARRAY;
-      case STRUCT:
-        return org.carbondata.format.DataType.STRUCT;
-      default:
-        return org.carbondata.format.DataType.STRING;
-    }
-  }
-
-  /**
-   * convert from external to wrapper data type
-   *
-   * @param dataType
-   * @return
-   */
-  public static DataType fromExternalToWrapperDataType(org.carbondata.format.DataType dataType) {
-    if (null == dataType) {
-      return null;
-    }
-    switch (dataType) {
-      case STRING:
-        return DataType.STRING;
-      case INT:
-        return DataType.INT;
-      case LONG:
-        return DataType.LONG;
-      case DOUBLE:
-        return DataType.DOUBLE;
-      case DECIMAL:
-        return DataType.DECIMAL;
-      case TIMESTAMP:
-        return DataType.TIMESTAMP;
-      case ARRAY:
-        return DataType.ARRAY;
-      case STRUCT:
-        return DataType.STRUCT;
-      default:
-        return DataType.STRING;
-    }
-  }
-  /**
-   * @param dictionaryColumnCardinality
-   * @param wrapperColumnSchemaList
-   * @return It returns formatted cardinality by adding -1 value for NoDictionary columns
-   */
-  public static int[] getFormattedCardinality(int[] dictionaryColumnCardinality,
-      List<ColumnSchema> wrapperColumnSchemaList) {
-    List<Integer> cardinality = new ArrayList<>();
-    int counter = 0;
-    for (int i = 0; i < wrapperColumnSchemaList.size(); i++) {
-      if (CarbonUtil.hasEncoding(wrapperColumnSchemaList.get(i).getEncodingList(),
-          org.carbondata.core.carbon.metadata.encoder.Encoding.DICTIONARY)) {
-        cardinality.add(dictionaryColumnCardinality[counter]);
-        counter++;
-      } else if (!wrapperColumnSchemaList.get(i).isDimensionColumn()) {
-        continue;
-      } else {
-        cardinality.add(-1);
-      }
-    }
-    return ArrayUtils.toPrimitive(cardinality.toArray(new Integer[cardinality.size()]));
-  }
-
-  public static List<ColumnSchema> getColumnSchemaList(List<CarbonDimension> carbonDimensionsList,
-      List<CarbonMeasure> carbonMeasureList) {
-    List<ColumnSchema> wrapperColumnSchemaList = new ArrayList<ColumnSchema>();
-    fillCollumnSchemaListForComplexDims(carbonDimensionsList, wrapperColumnSchemaList);
-    for (CarbonMeasure carbonMeasure : carbonMeasureList) {
-      wrapperColumnSchemaList.add(carbonMeasure.getColumnSchema());
-    }
-    return wrapperColumnSchemaList;
-  }
-
-  private static void fillCollumnSchemaListForComplexDims(
-      List<CarbonDimension> carbonDimensionsList, List<ColumnSchema> wrapperColumnSchemaList) {
-    for (CarbonDimension carbonDimension : carbonDimensionsList) {
-      wrapperColumnSchemaList.add(carbonDimension.getColumnSchema());
-      List<CarbonDimension> childDims = carbonDimension.getListOfChildDimensions();
-      if (null != childDims && childDims.size() > 0) {
-        fillCollumnSchemaListForComplexDims(childDims, wrapperColumnSchemaList);
-      }
-    }
-  }
-  /**
-   * Below method will be used to get all the block index info from index file
-   *
-   * @param taskId                  task id of the file
-   * @param tableBlockInfoList      list of table block
-   * @param absoluteTableIdentifier absolute table identifier
-   * @return list of block info
-   * @throws CarbonUtilException if any problem while reading
-   */
-  public static List<DataFileFooter> readCarbonIndexFile(String taskId,
-      List<TableBlockInfo> tableBlockInfoList, AbsoluteTableIdentifier absoluteTableIdentifier)
-      throws CarbonUtilException {
-    // need to sort the  block info list based for task in ascending  order so
-    // it will be sinkup with block index read from file
-    Collections.sort(tableBlockInfoList);
-    CarbonTablePath carbonTablePath = CarbonStorePath
-        .getCarbonTablePath(absoluteTableIdentifier.getStorePath(),
-            absoluteTableIdentifier.getCarbonTableIdentifier());
-    // geting the index file path
-    //TODO need to pass proper partition number when partiton will be supported
-    String carbonIndexFilePath = carbonTablePath
-        .getCarbonIndexFilePath(taskId, "0", tableBlockInfoList.get(0).getSegmentId());
-    DataFileFooterConverter fileFooterConverter = new DataFileFooterConverter();
-    try {
-      // read the index info and return
-      return fileFooterConverter.getIndexInfo(carbonIndexFilePath, tableBlockInfoList);
-    } catch (IOException e) {
-      throw new CarbonUtilException("Problem while reading the file metadata", e);
-    }
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/CarbonUtilException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonUtilException.java b/core/src/main/java/org/carbondata/core/util/CarbonUtilException.java
deleted file mode 100644
index 9c54a07..0000000
--- a/core/src/main/java/org/carbondata/core/util/CarbonUtilException.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.util;
-
-import java.util.Locale;
-
-public class CarbonUtilException extends Exception {
-
-  /**
-   * default serial version ID.
-   */
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * The Error message.
-   */
-  private String msg = "";
-
-  /**
-   * Constructor
-   *
-   * @param msg The error message for this exception.
-   */
-  public CarbonUtilException(String msg) {
-    super(msg);
-    this.msg = msg;
-  }
-
-  /**
-   * Constructor
-   *
-   * @param msg The error message for this exception.
-   */
-  public CarbonUtilException(String msg, Throwable t) {
-    super(msg, t);
-    this.msg = msg;
-  }
-
-  /**
-   * This method is used to get the localized message.
-   *
-   * @param locale - A Locale object represents a specific geographical,
-   *               political, or cultural region.
-   * @return - Localized error message.
-   */
-  public String getLocalizedMessage(Locale locale) {
-    return "";
-  }
-
-  /**
-   * getLocalizedMessage
-   */
-  @Override public String getLocalizedMessage() {
-    return super.getLocalizedMessage();
-  }
-
-  /**
-   * getMessage
-   */
-  public String getMessage() {
-    return this.msg;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/DataFileFooterConverter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/DataFileFooterConverter.java b/core/src/main/java/org/carbondata/core/util/DataFileFooterConverter.java
deleted file mode 100644
index cb28386..0000000
--- a/core/src/main/java/org/carbondata/core/util/DataFileFooterConverter.java
+++ /dev/null
@@ -1,467 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.util;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Iterator;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.datastore.block.TableBlockInfo;
-import org.carbondata.core.carbon.metadata.blocklet.BlockletInfo;
-import org.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
-import org.carbondata.core.carbon.metadata.blocklet.SegmentInfo;
-import org.carbondata.core.carbon.metadata.blocklet.compressor.ChunkCompressorMeta;
-import org.carbondata.core.carbon.metadata.blocklet.compressor.CompressionCodec;
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.PresenceMeta;
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletBTreeIndex;
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex;
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletMinMaxIndex;
-import org.carbondata.core.carbon.metadata.blocklet.sort.SortState;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.metadata.ValueEncoderMeta;
-import org.carbondata.core.reader.CarbonFooterReader;
-import org.carbondata.core.reader.CarbonIndexFileReader;
-import org.carbondata.format.BlockIndex;
-import org.carbondata.format.FileFooter;
-
-/**
- * Below class will be used to convert the thrift object of data file
- * meta data to wrapper object
- */
-public class DataFileFooterConverter {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(DataFileFooterConverter.class.getName());
-
-  /**
-   * Below method will be used to get the index info from index file
-   *
-   * @param filePath           file path of the index file
-   * @param tableBlockInfoList table block index
-   * @return list of index info
-   * @throws IOException problem while reading the index file
-   */
-  public List<DataFileFooter> getIndexInfo(String filePath, List<TableBlockInfo> tableBlockInfoList)
-      throws IOException {
-    CarbonIndexFileReader indexReader = new CarbonIndexFileReader();
-    List<DataFileFooter> dataFileFooters = new ArrayList<DataFileFooter>();
-    try {
-      // open the reader
-      indexReader.openThriftReader(filePath);
-      // get the index header
-      org.carbondata.format.IndexHeader readIndexHeader = indexReader.readIndexHeader();
-      List<ColumnSchema> columnSchemaList = new ArrayList<ColumnSchema>();
-      List<org.carbondata.format.ColumnSchema> table_columns = readIndexHeader.getTable_columns();
-      for (int i = 0; i < table_columns.size(); i++) {
-        columnSchemaList.add(thriftColumnSchmeaToWrapperColumnSchema(table_columns.get(i)));
-      }
-      // get the segment info
-      SegmentInfo segmentInfo = getSegmentInfo(readIndexHeader.getSegment_info());
-      BlockletIndex blockletIndex = null;
-      int counter = 0;
-      DataFileFooter dataFileFooter = null;
-      // read the block info from file
-      while (indexReader.hasNext()) {
-        BlockIndex readBlockIndexInfo = indexReader.readBlockIndexInfo();
-        blockletIndex = getBlockletIndex(readBlockIndexInfo.getBlock_index());
-        dataFileFooter = new DataFileFooter();
-        dataFileFooter.setBlockletIndex(blockletIndex);
-        dataFileFooter.setColumnInTable(columnSchemaList);
-        dataFileFooter.setNumberOfRows(readBlockIndexInfo.getNum_rows());
-        dataFileFooter.setTableBlockInfo(tableBlockInfoList.get(counter++));
-        dataFileFooter.setSegmentInfo(segmentInfo);
-        dataFileFooters.add(dataFileFooter);
-      }
-    } finally {
-      indexReader.closeThriftReader();
-    }
-    return dataFileFooters;
-  }
-
-  /**
-   * Below method will be used to convert thrift file meta to wrapper file meta
-   */
-  public DataFileFooter readDataFileFooter(String filePath, long blockOffset, long blockLength)
-      throws IOException {
-    DataFileFooter dataFileFooter = new DataFileFooter();
-    FileHolder fileReader = null;
-    try {
-      long completeBlockLength = blockOffset + blockLength;
-      long footerPointer = completeBlockLength - 8;
-      fileReader = FileFactory.getFileHolder(FileFactory.getFileType(filePath));
-      long actualFooterOffset = fileReader.readLong(filePath, footerPointer);
-      CarbonFooterReader reader = new CarbonFooterReader(filePath, actualFooterOffset);
-      FileFooter footer = reader.readFooter();
-      dataFileFooter.setVersionId(footer.getVersion());
-      dataFileFooter.setNumberOfRows(footer.getNum_rows());
-      dataFileFooter.setSegmentInfo(getSegmentInfo(footer.getSegment_info()));
-      List<ColumnSchema> columnSchemaList = new ArrayList<ColumnSchema>();
-      List<org.carbondata.format.ColumnSchema> table_columns = footer.getTable_columns();
-      for (int i = 0; i < table_columns.size(); i++) {
-        columnSchemaList.add(thriftColumnSchmeaToWrapperColumnSchema(table_columns.get(i)));
-      }
-      dataFileFooter.setColumnInTable(columnSchemaList);
-
-      List<org.carbondata.format.BlockletIndex> leaf_node_indices_Thrift =
-          footer.getBlocklet_index_list();
-      List<BlockletIndex> blockletIndexList = new ArrayList<BlockletIndex>();
-      for (int i = 0; i < leaf_node_indices_Thrift.size(); i++) {
-        BlockletIndex blockletIndex = getBlockletIndex(leaf_node_indices_Thrift.get(i));
-        blockletIndexList.add(blockletIndex);
-      }
-
-      List<org.carbondata.format.BlockletInfo> leaf_node_infos_Thrift =
-          footer.getBlocklet_info_list();
-      List<BlockletInfo> blockletInfoList = new ArrayList<BlockletInfo>();
-      for (int i = 0; i < leaf_node_infos_Thrift.size(); i++) {
-        BlockletInfo blockletInfo = getBlockletInfo(leaf_node_infos_Thrift.get(i));
-        blockletInfo.setBlockletIndex(blockletIndexList.get(i));
-        blockletInfoList.add(blockletInfo);
-      }
-      dataFileFooter.setBlockletList(blockletInfoList);
-      dataFileFooter.setBlockletIndex(getBlockletIndexForDataFileFooter(blockletIndexList));
-    } finally {
-      if (null != fileReader) {
-        fileReader.finish();
-      }
-    }
-    return dataFileFooter;
-  }
-
-  /**
-   * Below method will be used to get blocklet index for data file meta
-   *
-   * @param blockletIndexList
-   * @return blocklet index
-   */
-  private BlockletIndex getBlockletIndexForDataFileFooter(List<BlockletIndex> blockletIndexList) {
-    BlockletIndex blockletIndex = new BlockletIndex();
-    BlockletBTreeIndex blockletBTreeIndex = new BlockletBTreeIndex();
-    blockletBTreeIndex.setStartKey(blockletIndexList.get(0).getBtreeIndex().getStartKey());
-    blockletBTreeIndex
-        .setEndKey(blockletIndexList.get(blockletIndexList.size() - 1).getBtreeIndex().getEndKey());
-    blockletIndex.setBtreeIndex(blockletBTreeIndex);
-    byte[][] currentMinValue = blockletIndexList.get(0).getMinMaxIndex().getMinValues().clone();
-    byte[][] currentMaxValue = blockletIndexList.get(0).getMinMaxIndex().getMaxValues().clone();
-    byte[][] minValue = null;
-    byte[][] maxValue = null;
-    for (int i = 1; i < blockletIndexList.size(); i++) {
-      minValue = blockletIndexList.get(i).getMinMaxIndex().getMinValues();
-      maxValue = blockletIndexList.get(i).getMinMaxIndex().getMaxValues();
-      for (int j = 0; j < maxValue.length; j++) {
-        if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(currentMinValue[j], minValue[j]) > 0) {
-          currentMinValue[j] = minValue[j].clone();
-        }
-        if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(currentMaxValue[j], maxValue[j]) < 0) {
-          currentMaxValue[j] = maxValue[j].clone();
-        }
-      }
-    }
-
-    BlockletMinMaxIndex minMax = new BlockletMinMaxIndex();
-    minMax.setMaxValues(currentMaxValue);
-    minMax.setMinValues(currentMinValue);
-    blockletIndex.setMinMaxIndex(minMax);
-    return blockletIndex;
-  }
-
-  private ColumnSchema thriftColumnSchmeaToWrapperColumnSchema(
-      org.carbondata.format.ColumnSchema externalColumnSchema) {
-    ColumnSchema wrapperColumnSchema = new ColumnSchema();
-    wrapperColumnSchema.setColumnUniqueId(externalColumnSchema.getColumn_id());
-    wrapperColumnSchema.setColumnName(externalColumnSchema.getColumn_name());
-    wrapperColumnSchema.setColumnar(externalColumnSchema.isColumnar());
-    wrapperColumnSchema
-        .setDataType(thriftDataTyopeToWrapperDataType(externalColumnSchema.data_type));
-    wrapperColumnSchema.setDimensionColumn(externalColumnSchema.isDimension());
-    List<Encoding> encoders = new ArrayList<Encoding>();
-    for (org.carbondata.format.Encoding encoder : externalColumnSchema.getEncoders()) {
-      encoders.add(fromExternalToWrapperEncoding(encoder));
-    }
-    wrapperColumnSchema.setEncodingList(encoders);
-    wrapperColumnSchema.setNumberOfChild(externalColumnSchema.getNum_child());
-    wrapperColumnSchema.setPrecision(externalColumnSchema.getPrecision());
-    wrapperColumnSchema.setColumnGroup(externalColumnSchema.getColumn_group_id());
-    wrapperColumnSchema.setScale(externalColumnSchema.getScale());
-    wrapperColumnSchema.setDefaultValue(externalColumnSchema.getDefault_value());
-    wrapperColumnSchema.setAggregateFunction(externalColumnSchema.getAggregate_function());
-    return wrapperColumnSchema;
-  }
-
-  /**
-   * Below method is to convert the blocklet info of the thrift to wrapper
-   * blocklet info
-   *
-   * @param blockletInfoThrift blocklet info of the thrift
-   * @return blocklet info wrapper
-   */
-  private BlockletInfo getBlockletInfo(org.carbondata.format.BlockletInfo blockletInfoThrift) {
-    BlockletInfo blockletInfo = new BlockletInfo();
-    List<DataChunk> dimensionColumnChunk = new ArrayList<DataChunk>();
-    List<DataChunk> measureChunk = new ArrayList<DataChunk>();
-    Iterator<org.carbondata.format.DataChunk> column_data_chunksIterator =
-        blockletInfoThrift.getColumn_data_chunksIterator();
-    if (null != column_data_chunksIterator) {
-      while (column_data_chunksIterator.hasNext()) {
-        org.carbondata.format.DataChunk next = column_data_chunksIterator.next();
-        if (next.isRowMajor()) {
-          dimensionColumnChunk.add(getDataChunk(next, false));
-        } else if (next.getEncoders().contains(org.carbondata.format.Encoding.DELTA)) {
-          measureChunk.add(getDataChunk(next, true));
-        } else {
-          dimensionColumnChunk.add(getDataChunk(next, false));
-        }
-      }
-    }
-    blockletInfo.setDimensionColumnChunk(dimensionColumnChunk);
-    blockletInfo.setMeasureColumnChunk(measureChunk);
-    blockletInfo.setNumberOfRows(blockletInfoThrift.getNum_rows());
-    return blockletInfo;
-  }
-
-  /**
-   * Below method is convert the thrift encoding to wrapper encoding
-   *
-   * @param encoderThrift thrift encoding
-   * @return wrapper encoding
-   */
-  private Encoding fromExternalToWrapperEncoding(org.carbondata.format.Encoding encoderThrift) {
-    switch (encoderThrift) {
-      case DICTIONARY:
-        return Encoding.DICTIONARY;
-      case DELTA:
-        return Encoding.DELTA;
-      case RLE:
-        return Encoding.RLE;
-      case INVERTED_INDEX:
-        return Encoding.INVERTED_INDEX;
-      case BIT_PACKED:
-        return Encoding.BIT_PACKED;
-      case DIRECT_DICTIONARY:
-        return Encoding.DIRECT_DICTIONARY;
-      default:
-        return Encoding.DICTIONARY;
-    }
-  }
-
-  /**
-   * Below method will be used to convert the thrift compression to wrapper
-   * compression codec
-   *
-   * @param compressionCodecThrift
-   * @return wrapper compression codec
-   */
-  private CompressionCodec getCompressionCodec(
-      org.carbondata.format.CompressionCodec compressionCodecThrift) {
-    switch (compressionCodecThrift) {
-      case SNAPPY:
-        return CompressionCodec.SNAPPY;
-      default:
-        return CompressionCodec.SNAPPY;
-    }
-  }
-
-  /**
-   * Below method will be used to convert thrift segment object to wrapper
-   * segment object
-   *
-   * @param segmentInfo thrift segment info object
-   * @return wrapper segment info object
-   */
-  private SegmentInfo getSegmentInfo(org.carbondata.format.SegmentInfo segmentInfo) {
-    SegmentInfo info = new SegmentInfo();
-    int[] cardinality = new int[segmentInfo.getColumn_cardinalities().size()];
-    for (int i = 0; i < cardinality.length; i++) {
-      cardinality[i] = segmentInfo.getColumn_cardinalities().get(i);
-    }
-    info.setColumnCardinality(cardinality);
-    info.setNumberOfColumns(segmentInfo.getNum_cols());
-    return info;
-  }
-
-  /**
-   * Below method will be used to convert the blocklet index of thrift to
-   * wrapper
-   *
-   * @param blockletIndexThrift
-   * @return blocklet index wrapper
-   */
-  private BlockletIndex getBlockletIndex(org.carbondata.format.BlockletIndex blockletIndexThrift) {
-    org.carbondata.format.BlockletBTreeIndex btreeIndex = blockletIndexThrift.getB_tree_index();
-    org.carbondata.format.BlockletMinMaxIndex minMaxIndex = blockletIndexThrift.getMin_max_index();
-    return new BlockletIndex(
-        new BlockletBTreeIndex(btreeIndex.getStart_key(), btreeIndex.getEnd_key()),
-        new BlockletMinMaxIndex(minMaxIndex.getMin_values(), minMaxIndex.getMax_values()));
-  }
-
-  /**
-   * Below method will be used to convert the thrift compression meta to
-   * wrapper chunk compression meta
-   *
-   * @param chunkCompressionMetaThrift
-   * @return chunkCompressionMetaWrapper
-   */
-  private ChunkCompressorMeta getChunkCompressionMeta(
-      org.carbondata.format.ChunkCompressionMeta chunkCompressionMetaThrift) {
-    ChunkCompressorMeta compressorMeta = new ChunkCompressorMeta();
-    compressorMeta
-        .setCompressor(getCompressionCodec(chunkCompressionMetaThrift.getCompression_codec()));
-    compressorMeta.setCompressedSize(chunkCompressionMetaThrift.getTotal_compressed_size());
-    compressorMeta.setUncompressedSize(chunkCompressionMetaThrift.getTotal_uncompressed_size());
-    return compressorMeta;
-  }
-
-  /**
-   * Below method will be used to convert the thrift data type to wrapper data
-   * type
-   *
-   * @param dataTypeThrift
-   * @return dataType wrapper
-   */
-  private DataType thriftDataTyopeToWrapperDataType(org.carbondata.format.DataType dataTypeThrift) {
-    switch (dataTypeThrift) {
-      case STRING:
-        return DataType.STRING;
-      case SHORT:
-        return DataType.SHORT;
-      case INT:
-        return DataType.INT;
-      case LONG:
-        return DataType.LONG;
-      case DOUBLE:
-        return DataType.DOUBLE;
-      case DECIMAL:
-        return DataType.DECIMAL;
-      case TIMESTAMP:
-        return DataType.TIMESTAMP;
-      case ARRAY:
-        return DataType.ARRAY;
-      case STRUCT:
-        return DataType.STRUCT;
-      default:
-        return DataType.STRING;
-    }
-  }
-
-  /**
-   * Below method will be used to convert the thrift presence meta to wrapper
-   * presence meta
-   *
-   * @param presentMetadataThrift
-   * @return wrapper presence meta
-   */
-  private PresenceMeta getPresenceMeta(org.carbondata.format.PresenceMeta presentMetadataThrift) {
-    PresenceMeta presenceMeta = new PresenceMeta();
-    presenceMeta.setRepresentNullValues(presentMetadataThrift.isRepresents_presence());
-    presenceMeta.setBitSet(BitSet.valueOf(presentMetadataThrift.getPresent_bit_stream()));
-    return presenceMeta;
-  }
-
-  /**
-   * Below method will be used to convert the thrift object to wrapper object
-   *
-   * @param sortStateThrift
-   * @return wrapper sort state object
-   */
-  private SortState getSortState(org.carbondata.format.SortState sortStateThrift) {
-    if (sortStateThrift == org.carbondata.format.SortState.SORT_EXPLICIT) {
-      return SortState.SORT_EXPLICT;
-    } else if (sortStateThrift == org.carbondata.format.SortState.SORT_NATIVE) {
-      return SortState.SORT_NATIVE;
-    } else {
-      return SortState.SORT_NONE;
-    }
-  }
-
-  /**
-   * Below method will be used to convert the thrift data chunk to wrapper
-   * data chunk
-   *
-   * @param datachunkThrift
-   * @return wrapper data chunk
-   */
-  private DataChunk getDataChunk(org.carbondata.format.DataChunk datachunkThrift,
-      boolean isPresenceMetaPresent) {
-    DataChunk dataChunk = new DataChunk();
-    dataChunk.setColumnUniqueIdList(datachunkThrift.getColumn_ids());
-    dataChunk.setDataPageLength(datachunkThrift.getData_page_length());
-    dataChunk.setDataPageOffset(datachunkThrift.getData_page_offset());
-    if (isPresenceMetaPresent) {
-      dataChunk.setNullValueIndexForColumn(getPresenceMeta(datachunkThrift.getPresence()));
-    }
-    dataChunk.setRlePageLength(datachunkThrift.getRle_page_length());
-    dataChunk.setRlePageOffset(datachunkThrift.getRle_page_offset());
-    dataChunk.setRowMajor(datachunkThrift.isRowMajor());
-    dataChunk.setRowIdPageLength(datachunkThrift.getRowid_page_length());
-    dataChunk.setRowIdPageOffset(datachunkThrift.getRowid_page_offset());
-    dataChunk.setSortState(getSortState(datachunkThrift.getSort_state()));
-    dataChunk.setChunkCompressionMeta(getChunkCompressionMeta(datachunkThrift.getChunk_meta()));
-    List<Encoding> encodingList = new ArrayList<Encoding>(datachunkThrift.getEncoders().size());
-    for (int i = 0; i < datachunkThrift.getEncoders().size(); i++) {
-      encodingList.add(fromExternalToWrapperEncoding(datachunkThrift.getEncoders().get(i)));
-    }
-    dataChunk.setEncoderList(encodingList);
-    if (encodingList.contains(Encoding.DELTA)) {
-      List<ByteBuffer> thriftEncoderMeta = datachunkThrift.getEncoder_meta();
-      List<ValueEncoderMeta> encodeMetaList =
-          new ArrayList<ValueEncoderMeta>(thriftEncoderMeta.size());
-      for (int i = 0; i < thriftEncoderMeta.size(); i++) {
-        encodeMetaList.add(deserializeEncoderMeta(thriftEncoderMeta.get(i).array()));
-      }
-      dataChunk.setValueEncoderMeta(encodeMetaList);
-    }
-    return dataChunk;
-  }
-
-  /**
-   * Below method will be used to convert the encode metadata to
-   * ValueEncoderMeta object
-   *
-   * @param encoderMeta
-   * @return ValueEncoderMeta object
-   */
-  private ValueEncoderMeta deserializeEncoderMeta(byte[] encoderMeta) {
-    // TODO : should remove the unnecessary fields.
-    ByteArrayInputStream aos = null;
-    ObjectInputStream objStream = null;
-    ValueEncoderMeta meta = null;
-    try {
-      aos = new ByteArrayInputStream(encoderMeta);
-      objStream = new ObjectInputStream(aos);
-      meta = (ValueEncoderMeta) objStream.readObject();
-    } catch (ClassNotFoundException e) {
-      LOGGER.error(e);
-    } catch (IOException e) {
-      CarbonUtil.closeStreams(objStream);
-    }
-    return meta;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/DataTypeUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/DataTypeUtil.java b/core/src/main/java/org/carbondata/core/util/DataTypeUtil.java
deleted file mode 100644
index 92316d5..0000000
--- a/core/src/main/java/org/carbondata/core/util/DataTypeUtil.java
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.util;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.math.RoundingMode;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-import org.apache.commons.lang.NumberUtils;
-import org.apache.spark.unsafe.types.UTF8String;
-
-public final class DataTypeUtil {
-
-  /**
-   * LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(DataTypeUtil.class.getName());
-
-  /**
-   * This method will convert a given value to its specific type
-   *
-   * @param msrValue
-   * @param dataType
-   * @param carbonMeasure
-   * @return
-   */
-  public static Object getMeasureValueBasedOnDataType(String msrValue, DataType dataType,
-      CarbonMeasure carbonMeasure) {
-    switch (dataType) {
-      case DECIMAL:
-        BigDecimal bigDecimal =
-            new BigDecimal(msrValue).setScale(carbonMeasure.getScale(), RoundingMode.HALF_UP);
-        return normalizeDecimalValue(bigDecimal, carbonMeasure.getPrecision());
-      case INT:
-        return Double.valueOf(msrValue).longValue();
-      case LONG:
-        return Long.valueOf(msrValue);
-      default:
-        return Double.valueOf(msrValue);
-    }
-  }
-
-  /**
-   * This method will check the digits before dot with the max precision allowed
-   *
-   * @param bigDecimal
-   * @param allowedPrecision precision configured by the user
-   * @return
-   */
-  private static BigDecimal normalizeDecimalValue(BigDecimal bigDecimal, int allowedPrecision) {
-    if (bigDecimal.precision() > allowedPrecision) {
-      return null;
-    }
-    return bigDecimal;
-  }
-
-  /**
-   * This method will return the type of measure based on its data type
-   *
-   * @param dataType
-   * @return
-   */
-  public static char getAggType(DataType dataType) {
-    switch (dataType) {
-      case DECIMAL:
-        return CarbonCommonConstants.BIG_DECIMAL_MEASURE;
-      case INT:
-      case LONG:
-        return CarbonCommonConstants.BIG_INT_MEASURE;
-      default:
-        return CarbonCommonConstants.SUM_COUNT_VALUE_MEASURE;
-    }
-  }
-
-  /**
-   * This method will convert a big decimal value to bytes
-   *
-   * @param num
-   * @return
-   */
-  public static byte[] bigDecimalToByte(BigDecimal num) {
-    BigInteger sig = new BigInteger(num.unscaledValue().toString());
-    int scale = num.scale();
-    byte[] bscale = new byte[] { (byte) (scale) };
-    byte[] buff = sig.toByteArray();
-    byte[] completeArr = new byte[buff.length + bscale.length];
-    System.arraycopy(bscale, 0, completeArr, 0, bscale.length);
-    System.arraycopy(buff, 0, completeArr, bscale.length, buff.length);
-    return completeArr;
-  }
-
-  /**
-   * This method will convert a byte value back to big decimal value
-   *
-   * @param raw
-   * @return
-   */
-  public static BigDecimal byteToBigDecimal(byte[] raw) {
-    int scale = (raw[0] & 0xFF);
-    byte[] unscale = new byte[raw.length - 1];
-    System.arraycopy(raw, 1, unscale, 0, unscale.length);
-    BigInteger sig = new BigInteger(unscale);
-    return new BigDecimal(sig, scale);
-  }
-
-  /**
-   * returns the SqlStatement.Type of corresponding string value
-   *
-   * @param dataTypeStr
-   * @return return the SqlStatement.Type
-   */
-  public static DataType getDataType(String dataTypeStr) {
-    DataType dataType = null;
-    switch (dataTypeStr) {
-      case "TIMESTAMP":
-        dataType = DataType.TIMESTAMP;
-        break;
-      case "STRING":
-        dataType = DataType.STRING;
-        break;
-      case "INT":
-        dataType = DataType.INT;
-        break;
-      case "SHORT":
-        dataType = DataType.SHORT;
-        break;
-      case "LONG":
-        dataType = DataType.LONG;
-        break;
-      case "DOUBLE":
-        dataType = DataType.DOUBLE;
-        break;
-      case "DECIMAL":
-        dataType = DataType.DECIMAL;
-        break;
-      case "ARRAY":
-        dataType = DataType.ARRAY;
-        break;
-      case "STRUCT":
-        dataType = DataType.STRUCT;
-        break;
-      case "MAP":
-      default:
-        dataType = DataType.STRING;
-    }
-    return dataType;
-  }
-
-  /**
-   * Below method will be used to basically to know whether the input data is valid string of
-   * giving data type. If there is any non parseable string is present return false.
-   */
-  public static boolean isValidData(String data, DataType actualDataType) {
-    if (null == data) {
-      return false;
-    }
-    try {
-      switch (actualDataType) {
-        case SHORT:
-        case INT:
-        case LONG:
-        case DOUBLE:
-        case DECIMAL:
-          return NumberUtils.isNumber(data);
-        case TIMESTAMP:
-          if (data.isEmpty()) {
-            return false;
-          }
-          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
-              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-          try {
-            parser.parse(data);
-            return true;
-          } catch (ParseException e) {
-            return false;
-          }
-        default:
-          return true;
-      }
-    } catch (NumberFormatException ex) {
-      return false;
-    }
-  }
-
-  /**
-   * Below method will be used to convert the data passed to its actual data
-   * type
-   *
-   * @param data           data
-   * @param actualDataType actual data type
-   * @return actual data after conversion
-   */
-  public static Object getDataBasedOnDataType(String data, DataType actualDataType) {
-
-    if (null == data || CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(data)) {
-      return null;
-    }
-    try {
-      switch (actualDataType) {
-        case INT:
-          if (data.isEmpty()) {
-            return null;
-          }
-          return Integer.parseInt(data);
-        case SHORT:
-          if (data.isEmpty()) {
-            return null;
-          }
-          return Short.parseShort(data);
-        case DOUBLE:
-          if (data.isEmpty()) {
-            return null;
-          }
-          return Double.parseDouble(data);
-        case LONG:
-          if (data.isEmpty()) {
-            return null;
-          }
-          return Long.parseLong(data);
-        case TIMESTAMP:
-          if (data.isEmpty()) {
-            return null;
-          }
-          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
-              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-          Date dateToStr = null;
-          try {
-            dateToStr = parser.parse(data);
-            return dateToStr.getTime() * 1000;
-          } catch (ParseException e) {
-            LOGGER.error("Cannot convert" + data + " to Time/Long type value" + e.getMessage());
-            return null;
-          }
-        case DECIMAL:
-          if (data.isEmpty()) {
-            return null;
-          }
-          java.math.BigDecimal javaDecVal = new java.math.BigDecimal(data);
-          scala.math.BigDecimal scalaDecVal = new scala.math.BigDecimal(javaDecVal);
-          org.apache.spark.sql.types.Decimal decConverter =
-              new org.apache.spark.sql.types.Decimal();
-          return decConverter.set(scalaDecVal);
-        default:
-          return UTF8String.fromString(data);
-      }
-    } catch (NumberFormatException ex) {
-      LOGGER.error("Problem while converting data type" + data);
-      return null;
-    }
-
-  }
-
-  public static Object getMeasureDataBasedOnDataType(Object data, DataType dataType) {
-
-    if (null == data) {
-      return null;
-    }
-    try {
-      switch (dataType) {
-        case DOUBLE:
-          return data;
-        case LONG:
-          return data;
-        case DECIMAL:
-          java.math.BigDecimal javaDecVal = new java.math.BigDecimal(data.toString());
-          scala.math.BigDecimal scalaDecVal = new scala.math.BigDecimal(javaDecVal);
-          org.apache.spark.sql.types.Decimal decConverter =
-              new org.apache.spark.sql.types.Decimal();
-          return decConverter.set(scalaDecVal);
-        default:
-          return data;
-      }
-    } catch (NumberFormatException ex) {
-      LOGGER.error("Problem while converting data type" + data);
-      return null;
-    }
-
-  }
-
-  /**
-   * Below method will be used to basically to know whether any non parseable
-   * data is present or not. if present then return null so that system can
-   * process to default null member value.
-   *
-   * @param data           data
-   * @param actualDataType actual data type
-   * @return actual data after conversion
-   */
-  public static Object normalizeIntAndLongValues(String data, DataType actualDataType) {
-    if (null == data) {
-      return null;
-    }
-    try {
-      Object parsedValue = null;
-      switch (actualDataType) {
-        case INT:
-          parsedValue = Integer.parseInt(data);
-          break;
-        case LONG:
-          parsedValue = Long.parseLong(data);
-          break;
-        default:
-          return data;
-      }
-      if(null != parsedValue) {
-        return data;
-      }
-      return null;
-    } catch (NumberFormatException ex) {
-      return null;
-    }
-  }
-
-  /**
-   * This method will parse a given string value corresponding to its data type
-   *
-   * @param value     value to parse
-   * @param dimension dimension to get data type and precision and scale in case of decimal
-   *                  data type
-   * @return
-   */
-  public static String normalizeColumnValueForItsDataType(String value, CarbonDimension dimension) {
-    try {
-      Object parsedValue = null;
-      // validation will not be done for timestamp datatype as for timestamp direct dictionary
-      // is generated. No dictionary file is created for timestamp datatype column
-      switch (dimension.getDataType()) {
-        case DECIMAL:
-          return parseStringToBigDecimal(value, dimension);
-        case INT:
-        case LONG:
-          parsedValue = normalizeIntAndLongValues(value, dimension.getDataType());
-          break;
-        case DOUBLE:
-          parsedValue = Double.parseDouble(value);
-          break;
-        default:
-          return value;
-      }
-      if (null != parsedValue) {
-        return value;
-      }
-      return null;
-    } catch (Exception e) {
-      return null;
-    }
-  }
-
-  /**
-   * This method will parse a value to its datatype if datatype is decimal else will return
-   * the value passed
-   *
-   * @param value     value to be parsed
-   * @param dimension
-   * @return
-   */
-  public static String parseValue(String value, CarbonDimension dimension) {
-    try {
-      switch (dimension.getDataType()) {
-        case DECIMAL:
-          return parseStringToBigDecimal(value, dimension);
-        default:
-          return value;
-      }
-    } catch (Exception e) {
-      return null;
-    }
-  }
-
-  private static String parseStringToBigDecimal(String value, CarbonDimension dimension) {
-    BigDecimal bigDecimal = new BigDecimal(value)
-        .setScale(dimension.getColumnSchema().getScale(), RoundingMode.HALF_UP);
-    BigDecimal normalizedValue =
-        normalizeDecimalValue(bigDecimal, dimension.getColumnSchema().getPrecision());
-    if (null != normalizedValue) {
-      return normalizedValue.toString();
-    }
-    return null;
-  }
-}


[50/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/apache/carbondata/common/logging/impl/AuditLevel.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/carbondata/common/logging/impl/AuditLevel.java b/common/src/main/java/org/apache/carbondata/common/logging/impl/AuditLevel.java
new file mode 100644
index 0000000..cd0d7f5
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/logging/impl/AuditLevel.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.impl;
+
+import org.apache.log4j.Level;
+
+public class AuditLevel extends Level {
+
+  public static final AuditLevel AUDIT = new AuditLevel(55000, "AUDIT", 0);
+  private static final long serialVersionUID = -209614723183147373L;
+
+  /**
+   * Constructor
+   *
+   * @param level            log level
+   * @param levelStr         log level string
+   * @param syslogEquivalent syslogEquivalent
+   */
+  protected AuditLevel(int level, String levelStr, int syslogEquivalent) {
+    super(level, levelStr, syslogEquivalent);
+  }
+
+  /**
+   * Returns custom level for debug type log message
+   *
+   * @param val          value
+   * @param defaultLevel level
+   * @return custom level
+   */
+  public static AuditLevel toLevel(int val, Level defaultLevel) {
+    return AUDIT;
+  }
+
+  /**
+   * Returns custom level for debug type log message
+   *
+   * @param sArg         sArg
+   * @param defaultLevel level
+   * @return custom level
+   */
+  public static AuditLevel toLevel(String sArg, Level defaultLevel) {
+    return AUDIT;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/apache/carbondata/common/logging/impl/ExtendedRollingFileAppender.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/carbondata/common/logging/impl/ExtendedRollingFileAppender.java b/common/src/main/java/org/apache/carbondata/common/logging/impl/ExtendedRollingFileAppender.java
new file mode 100644
index 0000000..85f5ff9
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/logging/impl/ExtendedRollingFileAppender.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.impl;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.Locale;
+import java.util.Map.Entry;
+import java.util.TreeMap;
+
+import org.apache.log4j.Level;
+import org.apache.log4j.RollingFileAppender;
+import org.apache.log4j.helpers.CountingQuietWriter;
+import org.apache.log4j.helpers.LogLog;
+import org.apache.log4j.spi.LoggingEvent;
+
+/**
+ * Copied from log4j to remove the hard coding for the file name from it Copied
+ * form log4j and modified for renaming files
+ */
+public class ExtendedRollingFileAppender extends RollingFileAppender {
+
+  private static final String DATE_FORMAT_FOR_TRANSFER = "yyyy-MM-dd'_'HH-mm-ss";
+  protected int currentLevel = Level.FATAL_INT;
+  /**
+   * Added for DTS DTS2011122001074 Now in log file rolling(after file size
+   * exceeded the threshold) and deletion (after file count exceeded the file
+   * count threshold) it will print log message
+   */
+
+  private long nextRollover = 0;
+  private boolean cleanupInProgress = false;
+
+  /**
+   * Total number of files at any point of time should be Backup number of
+   * files + current file
+   */
+  private static void cleanLogs(final String startName, final String folderPath,
+      int maxBackupIndex) {
+    final String fileStartName = startName.toLowerCase(Locale.US);
+    // Delete the oldest file, to keep Windows happy.
+    File file = new File(folderPath);
+
+    if (file.exists()) {
+      File[] files = file.listFiles(new FileFilter() {
+
+        public boolean accept(File file) {
+          if (!file.isDirectory() && file.getName().toLowerCase(Locale.US)
+              .startsWith(fileStartName)) {
+            return true;
+          }
+          return false;
+        }
+      });
+
+      int backupFiles = files.length - 1;
+
+      if (backupFiles <= maxBackupIndex) {
+        return;
+      }
+
+      // Sort the file based on its name.
+      TreeMap<String, File> sortedMap = new TreeMap<String, File>();
+      for (File file1 : files) {
+        sortedMap.put(file1.getName(), file1);
+      }
+
+      // Remove the first log file from map. it will be <startName>.log
+      // itself which will be backed up in rollover
+      sortedMap.remove(sortedMap.firstKey());
+
+      Iterator<Entry<String, File>> it = sortedMap.entrySet().iterator();
+      Entry<String, File> temp = null;
+
+      // After clean up the files should be maxBackupIndex -1 number of
+      // files. Because one more backup file
+      // will be created after this method call is over
+      while (it.hasNext() && backupFiles > maxBackupIndex) {
+        temp = it.next();
+        File deleteFile = temp.getValue();
+        // Delete the file
+        // Fixed defect DTS2011122001074 after deletion of log file it
+        // will print the log message in ReportService.log
+        if (deleteFile.delete()) {
+          backupFiles--;
+        } else {
+          LogLog.error("Couldn't delete file :: " + deleteFile.getPath());
+        }
+      }
+    }
+  }
+
+  /**
+   * Copied from log4j to remove hardcoding of file name
+   */
+  public void rollOver() {
+    File target;
+    File file = new File(fileName);
+
+    String fileStartName = file.getName();
+    int dotIndex = fileStartName.indexOf('.');
+
+    if (dotIndex != -1) {
+      fileStartName = fileStartName.substring(0, dotIndex);
+    }
+    final String startName = fileStartName;
+    final String folderPath = file.getParent();
+
+    if (qw != null) {
+      long size = ((CountingQuietWriter) qw).getCount();
+      LogLog.debug("rolling over count=" + size);
+      // if operation fails, do not roll again until
+      // maxFileSize more bytes are written
+      nextRollover = size + maxFileSize;
+    }
+
+    LogLog.debug("maxBackupIndex=" + maxBackupIndex);
+
+    boolean renameSucceeded = true;
+
+    // If maxBackups <= 0, then there is no file renaming to be done.
+    if (maxBackupIndex > 0) {
+      DateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT_FOR_TRANSFER);
+
+      StringBuffer buffer = new StringBuffer();
+      String extension = "";
+      if (fileName.contains(".")) {
+        extension = fileName.substring(fileName.lastIndexOf("."));
+        buffer.append(fileName.substring(0, fileName.lastIndexOf(".")));
+      } else {
+        buffer.append(fileName);
+      }
+      buffer.append("_").append(dateFormat.format(new Date())).append(extension);
+      // Rename fileName to fileName.1
+      target = new File(buffer.toString());
+
+      this.closeFile(); // keep windows happy.
+
+      LogLog.debug("Renaming file " + file + " to " + target);
+      renameSucceeded = file.renameTo(target);
+
+      //
+      // if file rename failed, reopen file with append = true
+      //
+      if (!renameSucceeded) {
+        try {
+          this.setFile(fileName, true, bufferedIO, bufferSize);
+        } catch (InterruptedIOException e) {
+          Thread.currentThread().interrupt();
+        } catch (IOException e) {
+          LogLog.error("setFile(" + fileName + ", true) call failed.", e);
+        }
+      }
+    }
+
+    //
+    // if all renames were successful, then
+    //
+    if (renameSucceeded) {
+      try {
+        // This will also close the file. This is OK since multiple
+        // close operations are safe.
+        this.setFile(fileName, false, bufferedIO, bufferSize);
+        nextRollover = 0;
+      } catch (InterruptedIOException e) {
+        Thread.currentThread().interrupt();
+      } catch (IOException e) {
+        LogLog.error("setFile(" + fileName + ", false) call failed.", e);
+      }
+    }
+
+    // Do clean up finally
+    cleanUpLogs(startName, folderPath);
+  }
+
+  private void cleanUpLogs(final String startName, final String folderPath) {
+    if (maxBackupIndex > 0) {
+      // Clean the logs files
+      Runnable r = new Runnable() {
+
+        public void run() {
+          if (cleanupInProgress) {
+            return;
+          }
+          synchronized (ExtendedRollingFileAppender.class) {
+            cleanupInProgress = true;
+            try {
+              cleanLogs(startName, folderPath, maxBackupIndex);
+            } catch (Throwable e) {
+              // ignore any error
+              LogLog.error("Cleaning logs failed", e);
+            } finally {
+              cleanupInProgress = false;
+            }
+          }
+        }
+      };
+
+      Thread t = new Thread(r);
+      t.start();
+    }
+  }
+
+  protected void subAppend(LoggingEvent event) {
+    if (event.getLevel().toInt() <= currentLevel) {
+      super.subAppend(event);
+      if (fileName != null && qw != null) {
+        long size = ((CountingQuietWriter) qw).getCount();
+        if (size >= maxFileSize && size >= nextRollover) {
+          rollOver();
+        }
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/apache/carbondata/common/logging/impl/FileUtil.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/carbondata/common/logging/impl/FileUtil.java b/common/src/main/java/org/apache/carbondata/common/logging/impl/FileUtil.java
new file mode 100644
index 0000000..f5fb50e
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/logging/impl/FileUtil.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.impl;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Properties;
+
+import org.apache.log4j.Logger;
+
+/**
+ * Provides file Utility
+ */
+public final class FileUtil {
+
+  public static final String CARBON_PROPERTIES_FILE_PATH = "../../../conf/carbon.properties";
+  private static final Logger LOG = Logger.getLogger(FileUtil.class.getName());
+  private static Properties carbonProperties;
+
+  private FileUtil() {
+
+  }
+
+  public static Properties getCarbonProperties() {
+    if (null == carbonProperties) {
+      loadProperties();
+    }
+
+    return carbonProperties;
+  }
+
+  /**
+   * closes the stream
+   *
+   * @param stream stream to be closed.
+   */
+  public static void close(Closeable stream) {
+    if (null != stream) {
+      try {
+        stream.close();
+      } catch (IOException e) {
+        LOG.error("Exception while closing the Log stream");
+      }
+    }
+  }
+
+  private static void loadProperties() {
+    String property = System.getProperty("carbon.properties.filepath");
+    if (null == property) {
+      property = CARBON_PROPERTIES_FILE_PATH;
+    }
+    File file = new File(property);
+
+    FileInputStream fis = null;
+    try {
+      if (file.exists()) {
+        fis = new FileInputStream(file);
+
+        carbonProperties = new Properties();
+        carbonProperties.load(fis);
+      }
+    } catch (FileNotFoundException e) {
+      LOG.error("Could not find carbon properties file in the path " + property);
+    } catch (IOException e) {
+      LOG.error("Error while reading carbon properties file in the path " + property);
+    } finally {
+      if (null != fis) {
+        try {
+          fis.close();
+        } catch (IOException e) {
+          LOG.error("Error while closing the file stream for carbon.properties");
+        }
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/apache/carbondata/common/logging/impl/StandardLogService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/carbondata/common/logging/impl/StandardLogService.java b/common/src/main/java/org/apache/carbondata/common/logging/impl/StandardLogService.java
new file mode 100644
index 0000000..1ad71db
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/logging/impl/StandardLogService.java
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.impl;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Properties;
+
+import org.apache.carbondata.common.logging.LogService;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.log4j.Logger;
+import org.apache.log4j.MDC;
+
+/**
+ * Default Implementation of the <code>LogService</code>
+ */
+public final class StandardLogService implements LogService {
+
+  private static final String PARTITION_ID = "[partitionID:";
+  private static final String CARBON_AUDIT_LOG_PATH = "carbon.auditlog.file.path";
+  private static final String AUDIT_LOG_DEFAULT_PATH = "logs/CarbonAudit.log";
+  private static final String CARBON_AUDIT_LOG_ROLLING_UP_SIZE = "carbon.auditlog.max.file.size";
+  private static final String AUDIT_LOG_DEFAULT_ROLLING_UP_SIZE = "10MB";
+  private static final String CARBON_AUDIT_LOG_MAX_BACKUP = "carbon.auditlog.max.backup.files";
+  private static final String AUDIT_LOG_DEFAULT_MAX_BACKUP = "10";
+  private static final String CARBON_AUDIT_LOG_LEVEL = "carbon.logging.level";
+  private static final String AUDIT_LOG_DEFAULT_LEVEL = "INFO";
+  private static boolean doLog = true;
+  private Logger logger;
+
+  /**
+   * Constructor.
+   *
+   * @param clazzName for which the Logging is required
+   */
+  public StandardLogService(String clazzName) {
+    String auditLogPath = AUDIT_LOG_DEFAULT_PATH;
+    String rollupSize = AUDIT_LOG_DEFAULT_ROLLING_UP_SIZE;
+    String maxBackup = AUDIT_LOG_DEFAULT_MAX_BACKUP;
+    String logLevel = AUDIT_LOG_DEFAULT_LEVEL;
+
+    Properties props = new Properties();
+    Properties carbonProps = FileUtil.getCarbonProperties();
+
+    if (null != carbonProps) {
+      if (null != carbonProps.getProperty(CARBON_AUDIT_LOG_PATH)) {
+        auditLogPath = carbonProps.getProperty(CARBON_AUDIT_LOG_PATH);
+      }
+
+      if (null != carbonProps.getProperty(CARBON_AUDIT_LOG_ROLLING_UP_SIZE)) {
+        rollupSize = carbonProps.getProperty(CARBON_AUDIT_LOG_ROLLING_UP_SIZE);
+      }
+
+      if (null != carbonProps.getProperty(CARBON_AUDIT_LOG_MAX_BACKUP)) {
+        maxBackup = carbonProps.getProperty(CARBON_AUDIT_LOG_MAX_BACKUP);
+      }
+
+      if (null != carbonProps.getProperty(CARBON_AUDIT_LOG_LEVEL)) {
+        logLevel = carbonProps.getProperty(CARBON_AUDIT_LOG_LEVEL);
+      }
+    }
+
+    props.setProperty("log4j.rootLogger", logLevel + ",stdout,AUDL");
+
+    props.setProperty("log4j.appender.stdout", "org.apache.log4j.ConsoleAppender");
+    props.setProperty("log4j.appender.stdout.layout.ConversionPattern", "%d %-5p [%c] %m%n");
+    props.setProperty("log4j.appender.stdout.layout", "org.apache.log4j.PatternLayout");
+    props.setProperty("log4j.appender.AUDL",
+        "AuditExtendedRollingFileAppender");
+
+    props.setProperty("log4j.appender.AUDL.File", auditLogPath);
+    props.setProperty("log4j.appender.AUDL.threshold",
+        "AUDIT#AuditLevel");
+    props.setProperty("log4j.appender.AUDL.layout.ConversionPattern",
+        "%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n");
+    props.setProperty("log4j.appender.AUDL.layout", "org.apache.log4j.PatternLayout");
+    props.setProperty("log4j.appender.AUDL.MaxFileSize", rollupSize);
+    props.setProperty("log4j.appender.AUDL.MaxBackupIndex", maxBackup);
+
+    props.setProperty("log4j.logger.com.huawei", logLevel + ",stdout");
+    props.setProperty("log4j.logger.com.huawei", logLevel + ",AUDL");
+
+    logger = Logger.getLogger(clazzName);
+
+  }
+
+  public StandardLogService() {
+    this("Carbon");
+  }
+
+  /**
+   * returns is DO Log
+   *
+   * @return the doLog
+   */
+  public static boolean isDoLog() {
+    return doLog;
+  }
+
+  /**
+   * set Do Log
+   *
+   * @param doLog the doLog to set
+   */
+  public static void setDoLog(boolean doLog) {
+    StandardLogService.doLog = doLog;
+  }
+
+  public static String getPartitionID(String tableName) {
+    return tableName.substring(tableName.lastIndexOf('_') + 1, tableName.length());
+  }
+
+  public static void setThreadName(String partitionID, String queryID) {
+    StringBuffer b = new StringBuffer(PARTITION_ID);
+    b.append(partitionID);
+    if (null != queryID) {
+      b.append(";queryID:");
+      b.append(queryID);
+    }
+    b.append("]");
+    Thread.currentThread().setName(getThreadName() + b.toString());
+  }
+
+  private static String getThreadName() {
+    String name = Thread.currentThread().getName();
+    int index = name.indexOf(PARTITION_ID);
+    if (index > -1) {
+      name = name.substring(0, index);
+    } else {
+      name = '[' + name + ']';
+    }
+    return name.trim();
+  }
+
+  public boolean isDebugEnabled() {
+    return logger.isDebugEnabled();
+  }
+
+  public boolean isWarnEnabled() {
+    return logger.isEnabledFor(org.apache.log4j.Level.WARN);
+  }
+
+  public void debug(String message) {
+    if (logger.isDebugEnabled()) {
+      logMessage(Level.DEBUG, null, message);
+    }
+  }
+
+  public void error(String message) {
+    logMessage(Level.ERROR, null, message);
+  }
+
+  public void error(Throwable throwable, String message) {
+    logMessage(Level.ERROR, throwable, message);
+  }
+
+  public void error(Throwable throwable) {
+    logMessage(Level.ERROR, throwable, "");
+  }
+
+  public void info(String message) {
+    if (logger.isInfoEnabled()) {
+      logMessage(Level.INFO, null, message);
+    }
+  }
+
+  /**
+   * Utility Method to log the the Message.
+   */
+  private void logMessage(Level logLevel, Throwable throwable, String message) {
+    if (StandardLogService.doLog) {
+      try {
+        //Append the partition id and query id if exist
+        StringBuffer buff = new StringBuffer(Thread.currentThread().getName());
+        buff.append(" ");
+        buff.append(message);
+        message = buff.toString();
+        if (Level.ERROR.toString().equalsIgnoreCase(logLevel.toString())) {
+          logErrorMessage(throwable, message);
+        } else if (Level.DEBUG.toString().equalsIgnoreCase(logLevel.toString())) {
+          logDebugMessage(throwable, message);
+        } else if (Level.INFO.toString().equalsIgnoreCase(logLevel.toString())) {
+          logInfoMessage(throwable, message);
+        } else if (Level.WARN.toString().equalsIgnoreCase(logLevel.toString())) {
+          logWarnMessage(throwable, message);
+        } else if (Level.AUDIT.toString().equalsIgnoreCase(logLevel.toString())) {
+          audit(message);
+        } else if (Level.STATISTICS == logLevel) {
+          statistic(message);
+        }
+
+      } catch (Throwable t) {
+        logger.error(t);
+      }
+    }
+  }
+
+  private void logErrorMessage(Throwable throwable, String message) {
+
+    if (null == throwable) {
+      logger.error(message);
+    } else {
+      logger.error(message, throwable);
+    }
+  }
+
+  private void logInfoMessage(Throwable throwable, String message) {
+
+    if (null == throwable) {
+      logger.info(message);
+    } else {
+      logger.info(message, throwable);
+    }
+  }
+
+  private void logDebugMessage(Throwable throwable, String message) {
+
+    if (null == throwable) {
+      logger.debug(message);
+    } else {
+      logger.debug(message, throwable);
+    }
+  }
+
+  private void logWarnMessage(Throwable throwable, String message) {
+
+    if (null == throwable) {
+      logger.warn(message);
+    } else {
+      logger.warn(message, throwable);
+    }
+  }
+
+  public boolean isInfoEnabled() {
+    return logger.isInfoEnabled();
+  }
+
+  public void warn(String message) {
+    if (isWarnEnabled()) {
+      logMessage(Level.WARN, null, message);
+    }
+  }
+
+  public void setEventProperties(String propertyName, String propertyValue) {
+    MDC.put(propertyName, propertyValue);
+  }
+
+  /**
+   * log audit log
+   *
+   * @param msg audit log message
+   */
+  @Override public void audit(String msg) {
+    String hostName = "";
+
+    try {
+      hostName = InetAddress.getLocalHost().getHostName();
+    } catch (UnknownHostException e) {
+      hostName = "localhost";
+    }
+    String username = "unknown";
+    String threadid = "unknown";
+    try {
+      threadid = Thread.currentThread().getId() + "";
+      username = UserGroupInformation.getCurrentUser().getShortUserName();
+    } catch (IOException e) {
+      username = "unknown";
+    }
+    logger.log(AuditLevel.AUDIT,
+        "[" + hostName + "]" + "[" + username + "]" + "[Thread-" + threadid + "]" + msg);
+  }
+
+  @Override public void statistic(String message) {
+    logger.log(StatisticLevel.STATISTIC, message);
+  }
+
+  /**
+   * Specifies the logging level.
+   */
+  enum Level {
+
+    NONE(0),
+    DEBUG(1),
+    INFO(2),
+    STATISTICS(3),
+    ERROR(4),
+    AUDIT(5),
+    WARN(6);
+
+    /**
+     * Constructor.
+     *
+     * @param level
+     */
+    Level(final int level) {
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/apache/carbondata/common/logging/impl/StatisticLevel.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/carbondata/common/logging/impl/StatisticLevel.java b/common/src/main/java/org/apache/carbondata/common/logging/impl/StatisticLevel.java
new file mode 100644
index 0000000..163db51
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/logging/impl/StatisticLevel.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.common.logging.impl;
+
+import org.apache.log4j.Level;
+
+/**
+ * Extended log level class to log the statistic details
+ */
+public class StatisticLevel extends Level {
+
+  public static final StatisticLevel STATISTIC = new StatisticLevel(55000, "STATISTIC", 0);
+
+  private static final long serialVersionUID = -209614723183147373L;
+
+  /**
+   * Constructor
+   *
+   * @param level            log level
+   * @param levelStr         log level string
+   * @param syslogEquivalent syslogEquivalent
+   */
+  protected StatisticLevel(int level, String levelStr, int syslogEquivalent) {
+    super(level, levelStr, syslogEquivalent);
+  }
+
+  /**
+   * Returns custom level for debug type log message
+   *
+   * @param val          value
+   * @param defaultLevel level
+   * @return custom level
+   */
+  public static StatisticLevel toLevel(int val, Level defaultLevel) {
+    return STATISTIC;
+  }
+
+  /**
+   * Returns custom level for debug type log message
+   *
+   * @param sArg         sArg
+   * @param defaultLevel level
+   * @return custom level
+   */
+  public static StatisticLevel toLevel(String sArg, Level defaultLevel) {
+    return STATISTIC;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/carbondata/common/CarbonIterator.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/carbondata/common/CarbonIterator.java b/common/src/main/java/org/carbondata/common/CarbonIterator.java
deleted file mode 100644
index 2ac2ff3..0000000
--- a/common/src/main/java/org/carbondata/common/CarbonIterator.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common;
-
-import java.util.Iterator;
-
-/**
- * CarbonIterator adds default implement for remove. This is required for Java 7.
- * @param <E>
- */
-public abstract class CarbonIterator<E> implements Iterator<E> {
-
-  @Override public abstract boolean hasNext();
-
-  @Override public abstract E next();
-
-  @Override public void remove() {
-    throw new UnsupportedOperationException("remove");
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/carbondata/common/logging/LogService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/carbondata/common/logging/LogService.java b/common/src/main/java/org/carbondata/common/logging/LogService.java
deleted file mode 100644
index 1dcff57..0000000
--- a/common/src/main/java/org/carbondata/common/logging/LogService.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging;
-
-/**
- * for Log Services
- */
-public interface LogService {
-
-  void debug(String message);
-
-  void info(String message);
-
-  void warn(String message);
-
-  void error(String message);
-
-  void error(Throwable throwable);
-
-  void error(Throwable throwable, String message);
-
-  void audit(String message);
-
-  /**
-   * Below method will be used to log the statistic information
-   *
-   * @param message statistic message
-   */
-  void statistic(String message);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/carbondata/common/logging/LogServiceFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/carbondata/common/logging/LogServiceFactory.java b/common/src/main/java/org/carbondata/common/logging/LogServiceFactory.java
deleted file mode 100644
index 471f80c..0000000
--- a/common/src/main/java/org/carbondata/common/logging/LogServiceFactory.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging;
-
-import org.carbondata.common.logging.impl.StandardLogService;
-
-/**
- * Log Service factory
- */
-public final class LogServiceFactory {
-  private LogServiceFactory() {
-
-  }
-
-  /**
-   * return Logger Service.
-   *
-   * @param className provides class name
-   * @return LogService
-   */
-  public static LogService getLogService(final String className) {
-    return new StandardLogService(className);
-  }
-
-  public static LogService getLogService() {
-    return new StandardLogService();
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/carbondata/common/logging/impl/AuditExtendedRollingFileAppender.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/carbondata/common/logging/impl/AuditExtendedRollingFileAppender.java b/common/src/main/java/org/carbondata/common/logging/impl/AuditExtendedRollingFileAppender.java
deleted file mode 100644
index eafd677..0000000
--- a/common/src/main/java/org/carbondata/common/logging/impl/AuditExtendedRollingFileAppender.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.impl;
-
-import org.apache.log4j.spi.LoggingEvent;
-
-/**
- * Copied form log4j and modified for renaming files and restriction only for
- * audit logging
- */
-public class AuditExtendedRollingFileAppender extends ExtendedRollingFileAppender {
-
-  /**g
-   * Call RollingFileAppender method to append the log...
-   *
-   * @see org.apache.log4j.RollingFileAppender#subAppend(LoggingEvent)
-   */
-  protected void subAppend(LoggingEvent event) {
-    if (event.getLevel().toInt() == AuditLevel.AUDIT.toInt()) {
-      currentLevel = AuditLevel.AUDIT.toInt();
-      super.subAppend(event);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/carbondata/common/logging/impl/AuditLevel.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/carbondata/common/logging/impl/AuditLevel.java b/common/src/main/java/org/carbondata/common/logging/impl/AuditLevel.java
deleted file mode 100644
index f15f8cf..0000000
--- a/common/src/main/java/org/carbondata/common/logging/impl/AuditLevel.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.impl;
-
-import org.apache.log4j.Level;
-
-public class AuditLevel extends Level {
-
-  public static final AuditLevel AUDIT = new AuditLevel(55000, "AUDIT", 0);
-  private static final long serialVersionUID = -209614723183147373L;
-
-  /**
-   * Constructor
-   *
-   * @param level            log level
-   * @param levelStr         log level string
-   * @param syslogEquivalent syslogEquivalent
-   */
-  protected AuditLevel(int level, String levelStr, int syslogEquivalent) {
-    super(level, levelStr, syslogEquivalent);
-  }
-
-  /**
-   * Returns custom level for debug type log message
-   *
-   * @param val          value
-   * @param defaultLevel level
-   * @return custom level
-   */
-  public static AuditLevel toLevel(int val, Level defaultLevel) {
-    return AUDIT;
-  }
-
-  /**
-   * Returns custom level for debug type log message
-   *
-   * @param sArg         sArg
-   * @param defaultLevel level
-   * @return custom level
-   */
-  public static AuditLevel toLevel(String sArg, Level defaultLevel) {
-    return AUDIT;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/carbondata/common/logging/impl/ExtendedRollingFileAppender.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/carbondata/common/logging/impl/ExtendedRollingFileAppender.java b/common/src/main/java/org/carbondata/common/logging/impl/ExtendedRollingFileAppender.java
deleted file mode 100644
index 46afe9b..0000000
--- a/common/src/main/java/org/carbondata/common/logging/impl/ExtendedRollingFileAppender.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.impl;
-
-import java.io.File;
-import java.io.FileFilter;
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.Locale;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-import org.apache.log4j.Level;
-import org.apache.log4j.RollingFileAppender;
-import org.apache.log4j.helpers.CountingQuietWriter;
-import org.apache.log4j.helpers.LogLog;
-import org.apache.log4j.spi.LoggingEvent;
-
-/**
- * Copied from log4j to remove the hard coding for the file name from it Copied
- * form log4j and modified for renaming files
- */
-public class ExtendedRollingFileAppender extends RollingFileAppender {
-
-  private static final String DATE_FORMAT_FOR_TRANSFER = "yyyy-MM-dd'_'HH-mm-ss";
-  protected int currentLevel = Level.FATAL_INT;
-  /**
-   * Added for DTS DTS2011122001074 Now in log file rolling(after file size
-   * exceeded the threshold) and deletion (after file count exceeded the file
-   * count threshold) it will print log message
-   */
-
-  private long nextRollover = 0;
-  private boolean cleanupInProgress = false;
-
-  /**
-   * Total number of files at any point of time should be Backup number of
-   * files + current file
-   */
-  private static void cleanLogs(final String startName, final String folderPath,
-      int maxBackupIndex) {
-    final String fileStartName = startName.toLowerCase(Locale.US);
-    // Delete the oldest file, to keep Windows happy.
-    File file = new File(folderPath);
-
-    if (file.exists()) {
-      File[] files = file.listFiles(new FileFilter() {
-
-        public boolean accept(File file) {
-          if (!file.isDirectory() && file.getName().toLowerCase(Locale.US)
-              .startsWith(fileStartName)) {
-            return true;
-          }
-          return false;
-        }
-      });
-
-      int backupFiles = files.length - 1;
-
-      if (backupFiles <= maxBackupIndex) {
-        return;
-      }
-
-      // Sort the file based on its name.
-      TreeMap<String, File> sortedMap = new TreeMap<String, File>();
-      for (File file1 : files) {
-        sortedMap.put(file1.getName(), file1);
-      }
-
-      // Remove the first log file from map. it will be <startName>.log
-      // itself which will be backed up in rollover
-      sortedMap.remove(sortedMap.firstKey());
-
-      Iterator<Entry<String, File>> it = sortedMap.entrySet().iterator();
-      Entry<String, File> temp = null;
-
-      // After clean up the files should be maxBackupIndex -1 number of
-      // files. Because one more backup file
-      // will be created after this method call is over
-      while (it.hasNext() && backupFiles > maxBackupIndex) {
-        temp = it.next();
-        File deleteFile = temp.getValue();
-        // Delete the file
-        // Fixed defect DTS2011122001074 after deletion of log file it
-        // will print the log message in ReportService.log
-        if (deleteFile.delete()) {
-          backupFiles--;
-        } else {
-          LogLog.error("Couldn't delete file :: " + deleteFile.getPath());
-        }
-      }
-    }
-  }
-
-  /**
-   * Copied from log4j to remove hardcoding of file name
-   */
-  public void rollOver() {
-    File target;
-    File file = new File(fileName);
-
-    String fileStartName = file.getName();
-    int dotIndex = fileStartName.indexOf('.');
-
-    if (dotIndex != -1) {
-      fileStartName = fileStartName.substring(0, dotIndex);
-    }
-    final String startName = fileStartName;
-    final String folderPath = file.getParent();
-
-    if (qw != null) {
-      long size = ((CountingQuietWriter) qw).getCount();
-      LogLog.debug("rolling over count=" + size);
-      // if operation fails, do not roll again until
-      // maxFileSize more bytes are written
-      nextRollover = size + maxFileSize;
-    }
-
-    LogLog.debug("maxBackupIndex=" + maxBackupIndex);
-
-    boolean renameSucceeded = true;
-
-    // If maxBackups <= 0, then there is no file renaming to be done.
-    if (maxBackupIndex > 0) {
-      DateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT_FOR_TRANSFER);
-
-      StringBuffer buffer = new StringBuffer();
-      String extension = "";
-      if (fileName.contains(".")) {
-        extension = fileName.substring(fileName.lastIndexOf("."));
-        buffer.append(fileName.substring(0, fileName.lastIndexOf(".")));
-      } else {
-        buffer.append(fileName);
-      }
-      buffer.append("_").append(dateFormat.format(new Date())).append(extension);
-      // Rename fileName to fileName.1
-      target = new File(buffer.toString());
-
-      this.closeFile(); // keep windows happy.
-
-      LogLog.debug("Renaming file " + file + " to " + target);
-      renameSucceeded = file.renameTo(target);
-
-      //
-      // if file rename failed, reopen file with append = true
-      //
-      if (!renameSucceeded) {
-        try {
-          this.setFile(fileName, true, bufferedIO, bufferSize);
-        } catch (InterruptedIOException e) {
-          Thread.currentThread().interrupt();
-        } catch (IOException e) {
-          LogLog.error("setFile(" + fileName + ", true) call failed.", e);
-        }
-      }
-    }
-
-    //
-    // if all renames were successful, then
-    //
-    if (renameSucceeded) {
-      try {
-        // This will also close the file. This is OK since multiple
-        // close operations are safe.
-        this.setFile(fileName, false, bufferedIO, bufferSize);
-        nextRollover = 0;
-      } catch (InterruptedIOException e) {
-        Thread.currentThread().interrupt();
-      } catch (IOException e) {
-        LogLog.error("setFile(" + fileName + ", false) call failed.", e);
-      }
-    }
-
-    // Do clean up finally
-    cleanUpLogs(startName, folderPath);
-  }
-
-  private void cleanUpLogs(final String startName, final String folderPath) {
-    if (maxBackupIndex > 0) {
-      // Clean the logs files
-      Runnable r = new Runnable() {
-
-        public void run() {
-          if (cleanupInProgress) {
-            return;
-          }
-          synchronized (ExtendedRollingFileAppender.class) {
-            cleanupInProgress = true;
-            try {
-              cleanLogs(startName, folderPath, maxBackupIndex);
-            } catch (Throwable e) {
-              // ignore any error
-              LogLog.error("Cleaning logs failed", e);
-            } finally {
-              cleanupInProgress = false;
-            }
-          }
-        }
-      };
-
-      Thread t = new Thread(r);
-      t.start();
-    }
-  }
-
-  protected void subAppend(LoggingEvent event) {
-    if (event.getLevel().toInt() <= currentLevel) {
-      super.subAppend(event);
-      if (fileName != null && qw != null) {
-        long size = ((CountingQuietWriter) qw).getCount();
-        if (size >= maxFileSize && size >= nextRollover) {
-          rollOver();
-        }
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/carbondata/common/logging/impl/FileUtil.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/carbondata/common/logging/impl/FileUtil.java b/common/src/main/java/org/carbondata/common/logging/impl/FileUtil.java
deleted file mode 100644
index 62ee4ac..0000000
--- a/common/src/main/java/org/carbondata/common/logging/impl/FileUtil.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.impl;
-
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Properties;
-
-import org.apache.log4j.Logger;
-
-/**
- * Provides file Utility
- */
-public final class FileUtil {
-
-  public static final String CARBON_PROPERTIES_FILE_PATH = "../../../conf/carbon.properties";
-  private static final Logger LOG = Logger.getLogger(FileUtil.class.getName());
-  private static Properties carbonProperties;
-
-  private FileUtil() {
-
-  }
-
-  public static Properties getCarbonProperties() {
-    if (null == carbonProperties) {
-      loadProperties();
-    }
-
-    return carbonProperties;
-  }
-
-  /**
-   * closes the stream
-   *
-   * @param stream stream to be closed.
-   */
-  public static void close(Closeable stream) {
-    if (null != stream) {
-      try {
-        stream.close();
-      } catch (IOException e) {
-        LOG.error("Exception while closing the Log stream");
-      }
-    }
-  }
-
-  private static void loadProperties() {
-    String property = System.getProperty("carbon.properties.filepath");
-    if (null == property) {
-      property = CARBON_PROPERTIES_FILE_PATH;
-    }
-    File file = new File(property);
-
-    FileInputStream fis = null;
-    try {
-      if (file.exists()) {
-        fis = new FileInputStream(file);
-
-        carbonProperties = new Properties();
-        carbonProperties.load(fis);
-      }
-    } catch (FileNotFoundException e) {
-      LOG.error("Could not find carbon properties file in the path " + property);
-    } catch (IOException e) {
-      LOG.error("Error while reading carbon properties file in the path " + property);
-    } finally {
-      if (null != fis) {
-        try {
-          fis.close();
-        } catch (IOException e) {
-          LOG.error("Error while closing the file stream for carbon.properties");
-        }
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/carbondata/common/logging/impl/StandardLogService.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/carbondata/common/logging/impl/StandardLogService.java b/common/src/main/java/org/carbondata/common/logging/impl/StandardLogService.java
deleted file mode 100644
index dbc25f5..0000000
--- a/common/src/main/java/org/carbondata/common/logging/impl/StandardLogService.java
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.impl;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.Properties;
-
-import org.carbondata.common.logging.LogService;
-
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.log4j.Logger;
-import org.apache.log4j.MDC;
-
-/**
- * Default Implementation of the <code>LogService</code>
- */
-public final class StandardLogService implements LogService {
-
-  private static final String PARTITION_ID = "[partitionID:";
-  private static final String CARBON_AUDIT_LOG_PATH = "carbon.auditlog.file.path";
-  private static final String AUDIT_LOG_DEFAULT_PATH = "logs/CarbonAudit.log";
-  private static final String CARBON_AUDIT_LOG_ROLLING_UP_SIZE = "carbon.auditlog.max.file.size";
-  private static final String AUDIT_LOG_DEFAULT_ROLLING_UP_SIZE = "10MB";
-  private static final String CARBON_AUDIT_LOG_MAX_BACKUP = "carbon.auditlog.max.backup.files";
-  private static final String AUDIT_LOG_DEFAULT_MAX_BACKUP = "10";
-  private static final String CARBON_AUDIT_LOG_LEVEL = "carbon.logging.level";
-  private static final String AUDIT_LOG_DEFAULT_LEVEL = "INFO";
-  private static boolean doLog = true;
-  private Logger logger;
-
-  /**
-   * Constructor.
-   *
-   * @param clazzName for which the Logging is required
-   */
-  public StandardLogService(String clazzName) {
-    String auditLogPath = AUDIT_LOG_DEFAULT_PATH;
-    String rollupSize = AUDIT_LOG_DEFAULT_ROLLING_UP_SIZE;
-    String maxBackup = AUDIT_LOG_DEFAULT_MAX_BACKUP;
-    String logLevel = AUDIT_LOG_DEFAULT_LEVEL;
-
-    Properties props = new Properties();
-    Properties carbonProps = FileUtil.getCarbonProperties();
-
-    if (null != carbonProps) {
-      if (null != carbonProps.getProperty(CARBON_AUDIT_LOG_PATH)) {
-        auditLogPath = carbonProps.getProperty(CARBON_AUDIT_LOG_PATH);
-      }
-
-      if (null != carbonProps.getProperty(CARBON_AUDIT_LOG_ROLLING_UP_SIZE)) {
-        rollupSize = carbonProps.getProperty(CARBON_AUDIT_LOG_ROLLING_UP_SIZE);
-      }
-
-      if (null != carbonProps.getProperty(CARBON_AUDIT_LOG_MAX_BACKUP)) {
-        maxBackup = carbonProps.getProperty(CARBON_AUDIT_LOG_MAX_BACKUP);
-      }
-
-      if (null != carbonProps.getProperty(CARBON_AUDIT_LOG_LEVEL)) {
-        logLevel = carbonProps.getProperty(CARBON_AUDIT_LOG_LEVEL);
-      }
-    }
-
-    props.setProperty("log4j.rootLogger", logLevel + ",stdout,AUDL");
-
-    props.setProperty("log4j.appender.stdout", "org.apache.log4j.ConsoleAppender");
-    props.setProperty("log4j.appender.stdout.layout.ConversionPattern", "%d %-5p [%c] %m%n");
-    props.setProperty("log4j.appender.stdout.layout", "org.apache.log4j.PatternLayout");
-    props.setProperty("log4j.appender.AUDL",
-        "org.carbondata.common.logging.impl.AuditExtendedRollingFileAppender");
-
-    props.setProperty("log4j.appender.AUDL.File", auditLogPath);
-    props.setProperty("log4j.appender.AUDL.threshold",
-        "AUDIT#org.carbondata.common.logging.impl.AuditLevel");
-    props.setProperty("log4j.appender.AUDL.layout.ConversionPattern",
-        "%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n");
-    props.setProperty("log4j.appender.AUDL.layout", "org.apache.log4j.PatternLayout");
-    props.setProperty("log4j.appender.AUDL.MaxFileSize", rollupSize);
-    props.setProperty("log4j.appender.AUDL.MaxBackupIndex", maxBackup);
-
-    props.setProperty("log4j.logger.com.huawei", logLevel + ",stdout");
-    props.setProperty("log4j.logger.com.huawei", logLevel + ",AUDL");
-
-    logger = Logger.getLogger(clazzName);
-
-  }
-
-  public StandardLogService() {
-    this("Carbon");
-  }
-
-  /**
-   * returns is DO Log
-   *
-   * @return the doLog
-   */
-  public static boolean isDoLog() {
-    return doLog;
-  }
-
-  /**
-   * set Do Log
-   *
-   * @param doLog the doLog to set
-   */
-  public static void setDoLog(boolean doLog) {
-    StandardLogService.doLog = doLog;
-  }
-
-  public static String getPartitionID(String tableName) {
-    return tableName.substring(tableName.lastIndexOf('_') + 1, tableName.length());
-  }
-
-  public static void setThreadName(String partitionID, String queryID) {
-    StringBuffer b = new StringBuffer(PARTITION_ID);
-    b.append(partitionID);
-    if (null != queryID) {
-      b.append(";queryID:");
-      b.append(queryID);
-    }
-    b.append("]");
-    Thread.currentThread().setName(getThreadName() + b.toString());
-  }
-
-  private static String getThreadName() {
-    String name = Thread.currentThread().getName();
-    int index = name.indexOf(PARTITION_ID);
-    if (index > -1) {
-      name = name.substring(0, index);
-    } else {
-      name = '[' + name + ']';
-    }
-    return name.trim();
-  }
-
-  public boolean isDebugEnabled() {
-    return logger.isDebugEnabled();
-  }
-
-  public boolean isWarnEnabled() {
-    return logger.isEnabledFor(org.apache.log4j.Level.WARN);
-  }
-
-  public void debug(String message) {
-    if (logger.isDebugEnabled()) {
-      logMessage(Level.DEBUG, null, message);
-    }
-  }
-
-  public void error(String message) {
-    logMessage(Level.ERROR, null, message);
-  }
-
-  public void error(Throwable throwable, String message) {
-    logMessage(Level.ERROR, throwable, message);
-  }
-
-  public void error(Throwable throwable) {
-    logMessage(Level.ERROR, throwable, "");
-  }
-
-  public void info(String message) {
-    if (logger.isInfoEnabled()) {
-      logMessage(Level.INFO, null, message);
-    }
-  }
-
-  /**
-   * Utility Method to log the the Message.
-   */
-  private void logMessage(Level logLevel, Throwable throwable, String message) {
-    if (StandardLogService.doLog) {
-      try {
-        //Append the partition id and query id if exist
-        StringBuffer buff = new StringBuffer(Thread.currentThread().getName());
-        buff.append(" ");
-        buff.append(message);
-        message = buff.toString();
-        if (Level.ERROR.toString().equalsIgnoreCase(logLevel.toString())) {
-          logErrorMessage(throwable, message);
-        } else if (Level.DEBUG.toString().equalsIgnoreCase(logLevel.toString())) {
-          logDebugMessage(throwable, message);
-        } else if (Level.INFO.toString().equalsIgnoreCase(logLevel.toString())) {
-          logInfoMessage(throwable, message);
-        } else if (Level.WARN.toString().equalsIgnoreCase(logLevel.toString())) {
-          logWarnMessage(throwable, message);
-        } else if (Level.AUDIT.toString().equalsIgnoreCase(logLevel.toString())) {
-          audit(message);
-        } else if (Level.STATISTICS == logLevel) {
-          statistic(message);
-        }
-
-      } catch (Throwable t) {
-        logger.error(t);
-      }
-    }
-  }
-
-  private void logErrorMessage(Throwable throwable, String message) {
-
-    if (null == throwable) {
-      logger.error(message);
-    } else {
-      logger.error(message, throwable);
-    }
-  }
-
-  private void logInfoMessage(Throwable throwable, String message) {
-
-    if (null == throwable) {
-      logger.info(message);
-    } else {
-      logger.info(message, throwable);
-    }
-  }
-
-  private void logDebugMessage(Throwable throwable, String message) {
-
-    if (null == throwable) {
-      logger.debug(message);
-    } else {
-      logger.debug(message, throwable);
-    }
-  }
-
-  private void logWarnMessage(Throwable throwable, String message) {
-
-    if (null == throwable) {
-      logger.warn(message);
-    } else {
-      logger.warn(message, throwable);
-    }
-  }
-
-  public boolean isInfoEnabled() {
-    return logger.isInfoEnabled();
-  }
-
-  public void warn(String message) {
-    if (isWarnEnabled()) {
-      logMessage(Level.WARN, null, message);
-    }
-  }
-
-  public void setEventProperties(String propertyName, String propertyValue) {
-    MDC.put(propertyName, propertyValue);
-  }
-
-  /**
-   * log audit log
-   *
-   * @param msg audit log message
-   */
-  @Override public void audit(String msg) {
-    String hostName = "";
-
-    try {
-      hostName = InetAddress.getLocalHost().getHostName();
-    } catch (UnknownHostException e) {
-      hostName = "localhost";
-    }
-    String username = "unknown";
-    String threadid = "unknown";
-    try {
-      threadid = Thread.currentThread().getId() + "";
-      username = UserGroupInformation.getCurrentUser().getShortUserName();
-    } catch (IOException e) {
-      username = "unknown";
-    }
-    logger.log(AuditLevel.AUDIT,
-        "[" + hostName + "]" + "[" + username + "]" + "[Thread-" + threadid + "]" + msg);
-  }
-
-  @Override public void statistic(String message) {
-    logger.log(StatisticLevel.STATISTIC, message);
-  }
-
-  /**
-   * Specifies the logging level.
-   */
-  enum Level {
-
-    NONE(0),
-    DEBUG(1),
-    INFO(2),
-    STATISTICS(3),
-    ERROR(4),
-    AUDIT(5),
-    WARN(6);
-
-    /**
-     * Constructor.
-     *
-     * @param level
-     */
-    Level(final int level) {
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/main/java/org/carbondata/common/logging/impl/StatisticLevel.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/carbondata/common/logging/impl/StatisticLevel.java b/common/src/main/java/org/carbondata/common/logging/impl/StatisticLevel.java
deleted file mode 100644
index 211d055..0000000
--- a/common/src/main/java/org/carbondata/common/logging/impl/StatisticLevel.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.common.logging.impl;
-
-import org.apache.log4j.Level;
-
-/**
- * Extended log level class to log the statistic details
- */
-public class StatisticLevel extends Level {
-
-  public static final StatisticLevel STATISTIC = new StatisticLevel(55000, "STATISTIC", 0);
-
-  private static final long serialVersionUID = -209614723183147373L;
-
-  /**
-   * Constructor
-   *
-   * @param level            log level
-   * @param levelStr         log level string
-   * @param syslogEquivalent syslogEquivalent
-   */
-  protected StatisticLevel(int level, String levelStr, int syslogEquivalent) {
-    super(level, levelStr, syslogEquivalent);
-  }
-
-  /**
-   * Returns custom level for debug type log message
-   *
-   * @param val          value
-   * @param defaultLevel level
-   * @return custom level
-   */
-  public static StatisticLevel toLevel(int val, Level defaultLevel) {
-    return STATISTIC;
-  }
-
-  /**
-   * Returns custom level for debug type log message
-   *
-   * @param sArg         sArg
-   * @param defaultLevel level
-   * @return custom level
-   */
-  public static StatisticLevel toLevel(String sArg, Level defaultLevel) {
-    return STATISTIC;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/log4j.properties
----------------------------------------------------------------------
diff --git a/common/src/test/java/log4j.properties b/common/src/test/java/log4j.properties
index 8d966f0..3d35725 100644
--- a/common/src/test/java/log4j.properties
+++ b/common/src/test/java/log4j.properties
@@ -16,9 +16,9 @@
 # limitations under the License.
 #
 log4j.logger.com.huawei=INFO,R5
-log4j.appender.R5=org.carbondata.common.logging.impl.AuditExtendedRollingFileAppender
+log4j.appender.R5=org.apache.carbondata.common.logging.impl.AuditExtendedRollingFileAppender
 log4j.appender.R5.File=./unibiaudit.log
-log4j.appender.R5.threshold=AUDIT#org.carbondata.common.logging.AuditLevel
+log4j.appender.R5.threshold=AUDIT#org.apache.carbondata.common.logging.AuditLevel
 log4j.appender.R5.layout=org.apache.log4j.PatternLayout
 log4j.appender.R5.layout.ConversionPattern=%d [%t] %p [%c] %X{CLIENT_IP} %X{USER_NAME} %X{MODULE} %X{OPERATRION}- %m%n
 

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/apache/carbondata/common/logging/LogServiceFactoryTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/carbondata/common/logging/LogServiceFactoryTest_UT.java b/common/src/test/java/org/apache/carbondata/common/logging/LogServiceFactoryTest_UT.java
new file mode 100644
index 0000000..66248c7
--- /dev/null
+++ b/common/src/test/java/org/apache/carbondata/common/logging/LogServiceFactoryTest_UT.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging;
+
+import org.apache.carbondata.common.logging.impl.StandardLogService;
+
+import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LogServiceFactoryTest_UT extends TestCase {
+
+  @Before public void setUp() throws Exception {
+  }
+
+  @After public void tearDown() throws Exception {
+  }
+
+  @Test public void testGetLogService() {
+    LogService logger = LogServiceFactory.getLogService("sampleclass");
+    assertTrue(logger instanceof StandardLogService);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/apache/carbondata/common/logging/ft/LoggingServiceTest_FT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/carbondata/common/logging/ft/LoggingServiceTest_FT.java b/common/src/test/java/org/apache/carbondata/common/logging/ft/LoggingServiceTest_FT.java
new file mode 100644
index 0000000..fadb0b8
--- /dev/null
+++ b/common/src/test/java/org/apache/carbondata/common/logging/ft/LoggingServiceTest_FT.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.ft;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+
+import junit.framework.TestCase;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.MDC;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LoggingServiceTest_FT extends TestCase {
+
+  private static LogService logger =
+      LogServiceFactory.getLogService(LoggingServiceTest_FT.class.getName());
+
+  @Before public void setUp() throws Exception {
+    MDC.put("MODULE", "Function Test");
+    MDC.put("USER_NAME", "testuser");
+    MDC.put("CLIENT_IP", "127.0.0.1");
+    MDC.put("OPERATRION", "log");
+  }
+
+  @Test public void testIsAuditFileCreated() {
+    File f = new File("./unibiaudit.log");
+    Assert.assertFalse(f.exists());
+  }
+
+  @Test public void testAudit() {
+
+    String expectedAuditLine =
+        "[main] AUDIT [com.huawei.iweb.platform.logging.ft.LoggingServiceTest_FT] 127.0.0.1 "
+            + "testuser Function Test log- audit message created";
+    logger.audit("audit message created");
+
+    LogManager.shutdown();
+
+    try {
+      FileInputStream fstream = new FileInputStream("./unibiaudit.log");
+      BufferedReader br = new BufferedReader(new InputStreamReader(fstream));
+      String actualAuditLine = null;
+      String strLine = null;
+      while ((strLine = br.readLine()) != null) {
+        actualAuditLine = strLine;
+      }
+
+      System.out.println(actualAuditLine);
+
+      if (actualAuditLine != null) {
+        int index = actualAuditLine.indexOf("[main]");
+        actualAuditLine = actualAuditLine.substring(index);
+        Assert.assertEquals(expectedAuditLine, actualAuditLine);
+      } else {
+        Assert.assertTrue(false);
+      }
+    } catch (FileNotFoundException e) {
+      e.printStackTrace();
+      Assert.assertTrue(!false);
+    } catch (IOException e) {
+      e.printStackTrace();
+      Assert.assertTrue(false);
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/apache/carbondata/common/logging/impl/AuditExtendedRollingFileAppenderTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/carbondata/common/logging/impl/AuditExtendedRollingFileAppenderTest_UT.java b/common/src/test/java/org/apache/carbondata/common/logging/impl/AuditExtendedRollingFileAppenderTest_UT.java
new file mode 100644
index 0000000..4032ddb
--- /dev/null
+++ b/common/src/test/java/org/apache/carbondata/common/logging/impl/AuditExtendedRollingFileAppenderTest_UT.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.impl;
+
+import junit.framework.Assert;
+import mockit.Deencapsulation;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class AuditExtendedRollingFileAppenderTest_UT {
+
+  private AuditExtendedRollingFileAppender rAppender = null;
+
+  @Before public void setUp() throws Exception {
+    rAppender = new AuditExtendedRollingFileAppender();
+    Deencapsulation.setField(rAppender, "fileName", "audit.log");
+    Deencapsulation.setField(rAppender, "maxBackupIndex", 1);
+    Deencapsulation.setField(rAppender, "maxFileSize", 1000L);
+
+  }
+
+  @After public void tearDown() throws Exception {
+
+  }
+
+  @Test public void testRollOver() {
+    rAppender.rollOver();
+    rAppender.rollOver();
+    rAppender.rollOver();
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testCleanLogs() {
+    final String startName = "audit";
+    final String folderPath = "./";
+    int maxBackupIndex = 1;
+
+    Deencapsulation.invoke(rAppender, "cleanLogs", startName, folderPath, maxBackupIndex);
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testSubAppendLoggingEvent() {
+    Logger logger = Logger.getLogger(this.getClass());
+    LoggingEvent event = new LoggingEvent(null, logger, 0L, AuditLevel.AUDIT, null, null);
+
+    Deencapsulation.setField(rAppender, "qw", null);
+    try {
+      rAppender.subAppend(event);
+    } catch (Exception e) {
+      //
+    }
+    Assert.assertTrue(true);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/apache/carbondata/common/logging/impl/AuditLevelTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/carbondata/common/logging/impl/AuditLevelTest_UT.java b/common/src/test/java/org/apache/carbondata/common/logging/impl/AuditLevelTest_UT.java
new file mode 100644
index 0000000..4c285a7
--- /dev/null
+++ b/common/src/test/java/org/apache/carbondata/common/logging/impl/AuditLevelTest_UT.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.impl;
+
+import junit.framework.TestCase;
+import org.apache.log4j.Level;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class AuditLevelTest_UT extends TestCase {
+
+  @Before public void setUp() throws Exception {
+  }
+
+  @After public void tearDown() throws Exception {
+  }
+
+  @Test public void testAuditLevel() {
+    assertEquals(AuditLevel.AUDIT.toInt(), 55000);
+  }
+
+  @Test public void testToLevelIntLevel() {
+    assertSame(AuditLevel.AUDIT, AuditLevel.toLevel(55000, Level.DEBUG));
+  }
+
+  @Test public void testToLevelStringLevel() {
+    assertSame(AuditLevel.AUDIT, AuditLevel.toLevel("AUDIT", Level.DEBUG));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/apache/carbondata/common/logging/impl/ExtendedRollingFileAppenderTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/carbondata/common/logging/impl/ExtendedRollingFileAppenderTest_UT.java b/common/src/test/java/org/apache/carbondata/common/logging/impl/ExtendedRollingFileAppenderTest_UT.java
new file mode 100644
index 0000000..006db9c
--- /dev/null
+++ b/common/src/test/java/org/apache/carbondata/common/logging/impl/ExtendedRollingFileAppenderTest_UT.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.impl;
+
+import junit.framework.Assert;
+import mockit.Deencapsulation;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class ExtendedRollingFileAppenderTest_UT {
+
+  private ExtendedRollingFileAppender rAppender = null;
+
+  @Before public void setUp() throws Exception {
+    rAppender = new ExtendedRollingFileAppender();
+    Deencapsulation.setField(rAppender, "fileName", "dummy.log");
+    Deencapsulation.setField(rAppender, "maxBackupIndex", 1);
+    Deencapsulation.setField(rAppender, "maxFileSize", 1000L);
+  }
+
+  @After public void tearDown() throws Exception {
+  }
+
+  @Test public void testRollOver() {
+    rAppender.rollOver();
+    rAppender.rollOver();
+    rAppender.rollOver();
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testCleanLogs() {
+    final String startName = "dummy";
+    final String folderPath = "./";
+    int maxBackupIndex = 1;
+
+    Deencapsulation.invoke(rAppender, "cleanLogs", startName, folderPath, maxBackupIndex);
+  }
+
+  @Test public void testSubAppendLoggingEvent() {
+    Logger logger = Logger.getLogger(this.getClass());
+    LoggingEvent event = new LoggingEvent(null, logger, 0L, AuditLevel.DEBUG, null, null);
+
+    try {
+      rAppender.subAppend(event);
+    } catch (Exception e) {
+      //
+    }
+    Assert.assertTrue(true);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/apache/carbondata/common/logging/impl/FileUtilTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/carbondata/common/logging/impl/FileUtilTest_UT.java b/common/src/test/java/org/apache/carbondata/common/logging/impl/FileUtilTest_UT.java
new file mode 100644
index 0000000..495b216
--- /dev/null
+++ b/common/src/test/java/org/apache/carbondata/common/logging/impl/FileUtilTest_UT.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.impl;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+
+import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class FileUtilTest_UT extends TestCase {
+
+  /**
+   * @throws Exception
+   */
+  @Before public void setUp() throws Exception {
+    File f = new File("myfile.txt");
+    if (!f.exists()) {
+      f.createNewFile();
+    }
+  }
+
+  /**
+   * @throws Exception
+   */
+  @After public void tearDown() throws Exception {
+    File f = new File("myfile.txt");
+    if (f.exists()) {
+      f.delete();
+    }
+  }
+
+  @Test public void testClose() {
+    try {
+      FileInputStream in = new FileInputStream(new File("myfile.txt"));
+      FileUtil.close(in);
+      assertTrue(true);
+    } catch (FileNotFoundException e) {
+      assertTrue(false);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/apache/carbondata/common/logging/impl/StandardLogServiceTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/carbondata/common/logging/impl/StandardLogServiceTest_UT.java b/common/src/test/java/org/apache/carbondata/common/logging/impl/StandardLogServiceTest_UT.java
new file mode 100644
index 0000000..0ad4bd7
--- /dev/null
+++ b/common/src/test/java/org/apache/carbondata/common/logging/impl/StandardLogServiceTest_UT.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.common.logging.impl;
+
+import junit.framework.TestCase;
+import mockit.Mock;
+import mockit.MockUp;
+import org.apache.log4j.Category;
+import org.apache.log4j.Priority;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class StandardLogServiceTest_UT extends TestCase {
+
+  private StandardLogService logService = null;
+
+  /**
+   * @throws Exception
+   */
+  @Before public void setUp() throws Exception {
+
+    new MockUp<Category>() {
+      @SuppressWarnings("unused")
+      @Mock public boolean isDebugEnabled() {
+        return true;
+      }
+
+      @SuppressWarnings("unused")
+      @Mock public boolean isEnabledFor(Priority level) {
+        return true;
+      }
+
+      @SuppressWarnings("unused")
+      @Mock public boolean isInfoEnabled() {
+        return true;
+      }
+    };
+
+    logService = new StandardLogService(this.getClass().getName());
+  }
+
+  /**
+   * @throws Exception
+   * @Author k00742797
+   * @Description : tearDown
+   */
+  @After public void tearDown() throws Exception {
+  }
+
+  @Test public void testStandardLogService() {
+    if (logService != null && logService instanceof StandardLogService) {
+      Assert.assertTrue(true);
+    } else {
+      Assert.assertTrue(false);
+    }
+  }
+
+  @Test public void testIsDebugEnabled() {
+    Assert.assertEquals(true, logService.isDebugEnabled());
+  }
+
+  @Test public void testIsWarnEnabled() {
+    Assert.assertEquals(true, logService.isWarnEnabled());
+  }
+
+  @Test public void testSecureLogEventObjectArray() {
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testAuditLogEventObjectArray() {
+    logService.audit("testing");
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testDebugLogEventObjectArray() {
+    logService.debug("testing");
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testErrorLogEventObjectArray() {
+    logService.error("testing");
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testErrorLogEventThrowableObjectArray() {
+    Exception exception = new Exception("test");
+    logService.error(exception);
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testErrorLogEventThrowableMessage() {
+    Exception exception = new Exception("test");
+    logService.error(exception, "additional message");
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testInfoLogEventObjectArray() {
+    logService.info("testing");
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testIsInfoEnabled() {
+    Assert.assertEquals(true, logService.isInfoEnabled());
+  }
+
+  @Test public void testDeleteLogs() {
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testFlushLogs() {
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testSetEventProperties() {
+    logService.setEventProperties("CLIENT_IP", "127.0.0.1");
+    Assert.assertTrue(true);
+  }
+
+  @Test public void testIsDoLog() {
+    StandardLogService.setDoLog(true);
+    Assert.assertEquals(true, StandardLogService.isDoLog());
+
+    StandardLogService.setDoLog(false);
+    Assert.assertEquals(false, StandardLogService.isDoLog());
+
+  }
+
+  @Test public void testSetDoLog() {
+    StandardLogService.setDoLog(true);
+    Assert.assertEquals(true, StandardLogService.isDoLog());
+  }
+
+  @Test public void testAuditString() {
+    logService.audit("audit message");
+    Assert.assertTrue(true);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/carbondata/common/logging/LogServiceFactoryTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/carbondata/common/logging/LogServiceFactoryTest_UT.java b/common/src/test/java/org/carbondata/common/logging/LogServiceFactoryTest_UT.java
deleted file mode 100644
index f76b910..0000000
--- a/common/src/test/java/org/carbondata/common/logging/LogServiceFactoryTest_UT.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging;
-
-import org.carbondata.common.logging.impl.StandardLogService;
-
-import junit.framework.TestCase;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class LogServiceFactoryTest_UT extends TestCase {
-
-  @Before public void setUp() throws Exception {
-  }
-
-  @After public void tearDown() throws Exception {
-  }
-
-  @Test public void testGetLogService() {
-    LogService logger = LogServiceFactory.getLogService("sampleclass");
-    assertTrue(logger instanceof StandardLogService);
-  }
-
-}


[35/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
new file mode 100644
index 0000000..7acabf2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -0,0 +1,1428 @@
+/*
+
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+import java.io.Closeable;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.TableBlockInfo;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
+import org.apache.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
+import org.apache.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.carbon.path.CarbonStorePath;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnGroupModel;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.UnBlockIndexer;
+import org.apache.carbondata.core.datastorage.store.compression.MeasureMetaDataModel;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile;
+import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFileFilter;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.keygenerator.mdkey.NumberCompressor;
+import org.apache.carbondata.core.metadata.ValueEncoderMeta;
+import org.apache.carbondata.scan.model.QueryDimension;
+
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.pentaho.di.core.exception.KettleException;
+
+
+public final class CarbonUtil {
+
+  public static final String HDFS_PREFIX = "hdfs://";
+  public static final String VIEWFS_PREFIX = "viewfs://";
+  private static final String FS_DEFAULT_FS = "fs.defaultFS";
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(CarbonUtil.class.getName());
+
+  /**
+   * EIGHT
+   */
+  private static final int CONST_EIGHT = 8;
+
+  /**
+   * SEVEN
+   */
+  private static final int CONST_SEVEN = 7;
+
+  /**
+   * HUNDRED
+   */
+  private static final int CONST_HUNDRED = 100;
+
+  private static final Configuration conf = new Configuration(true);
+
+  private CarbonUtil() {
+
+  }
+
+  /**
+   * This method closes the streams
+   *
+   * @param streams - streams to close.
+   */
+  public static void closeStreams(Closeable... streams) {
+    // Added if to avoid NullPointerException in case one stream is being passed as null
+    if (null != streams) {
+      for (Closeable stream : streams) {
+        if (null != stream) {
+          try {
+            stream.close();
+          } catch (IOException e) {
+            LOGGER.error("Error while closing stream" + stream);
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * @param baseStorePath
+   * @return
+   */
+  private static int createBaseStoreFolders(String baseStorePath) {
+    FileFactory.FileType fileType = FileFactory.getFileType(baseStorePath);
+    try {
+      if (!FileFactory.isFileExist(baseStorePath, fileType, false)) {
+        if (!FileFactory.mkdirs(baseStorePath, fileType)) {
+          return -1;
+        }
+      }
+    } catch (Exception e) {
+      return -1;
+    }
+    return 1;
+  }
+
+  /**
+   * This method checks whether Restructure Folder exists or not
+   * and if not exist then return the number with which folder need to created.
+   *
+   * @param baseStorePath -
+   *                      baselocation where folder will be created.
+   * @return counter
+   * counter with which folder will be created.
+   */
+  public static int checkAndReturnCurrentRestructFolderNumber(String baseStorePath,
+      final String filterType, final boolean isDirectory) {
+    if (null == baseStorePath || 0 == baseStorePath.length()) {
+      return -1;
+    }
+    // change the slashes to /
+    baseStorePath = baseStorePath.replace("\\", "/");
+
+    // check if string wnds with / then remove that.
+    if (baseStorePath.charAt(baseStorePath.length() - 1) == '/') {
+      baseStorePath = baseStorePath.substring(0, baseStorePath.lastIndexOf("/"));
+    }
+    int retValue = createBaseStoreFolders(baseStorePath);
+    if (-1 == retValue) {
+      return retValue;
+    }
+
+    CarbonFile carbonFile =
+        FileFactory.getCarbonFile(baseStorePath, FileFactory.getFileType(baseStorePath));
+
+    // List of directories
+    CarbonFile[] listFiles = carbonFile.listFiles(new CarbonFileFilter() {
+      @Override public boolean accept(CarbonFile pathname) {
+        if (isDirectory && pathname.isDirectory()) {
+          if (pathname.getAbsolutePath().indexOf(filterType) > -1) {
+            return true;
+          }
+        } else {
+          if (pathname.getAbsolutePath().indexOf(filterType) > -1) {
+            return true;
+          }
+        }
+
+        return false;
+      }
+    });
+
+    int counter = -1;
+
+    // if no folder exists then return -1
+    if (listFiles.length == 0) {
+      return counter;
+    }
+
+    counter = findCounterValue(filterType, listFiles, counter);
+    return counter;
+  }
+
+  public static int checkAndReturnCurrentLoadFolderNumber(String baseStorePath) {
+    return checkAndReturnCurrentRestructFolderNumber(baseStorePath, "Load_", true);
+  }
+
+  /**
+   * @param filterType
+   * @param listFiles
+   * @param counter
+   * @return
+   */
+  private static int findCounterValue(final String filterType, CarbonFile[] listFiles,
+      int counter) {
+    if ("Load_".equals(filterType)) {
+      for (CarbonFile files : listFiles) {
+        String folderName = getFolderName(files);
+        if (folderName.indexOf('.') > -1) {
+          folderName = folderName.substring(0, folderName.indexOf('.'));
+        }
+        String[] split = folderName.split("_");
+
+        if (split.length > 1 && counter < Integer.parseInt(split[1])) {
+          counter = Integer.parseInt(split[1]);
+        }
+      }
+    } else {
+      // Iterate list of Directories and find the counter value
+      for (CarbonFile eachFile : listFiles) {
+        String folderName = getFolderName(eachFile);
+        String[] split = folderName.split("_");
+        if (counter < Integer.parseInt(split[1])) {
+          counter = Integer.parseInt(split[1]);
+        }
+      }
+    }
+    return counter;
+  }
+
+  /**
+   * @param eachFile
+   * @return
+   */
+  private static String getFolderName(CarbonFile eachFile) {
+    String str = eachFile.getAbsolutePath();
+    str = str.replace("\\", "/");
+    int firstFolderIndex = str.lastIndexOf("/");
+    String folderName = str.substring(firstFolderIndex);
+    return folderName;
+  }
+
+  /**
+   * This method will be used to update the dimension cardinality
+   *
+   * @param dimCardinality
+   * @return new increment cardinality
+   */
+  public static int[] getIncrementedCardinality(int[] dimCardinality) {
+    // get the cardinality incr factor
+    final int incrValue = CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL;
+
+    int perIncr = 0;
+    int remainder = 0;
+    int[] newDimsC = new int[dimCardinality.length];
+    for (int i = 0; i < dimCardinality.length; i++) {
+      // get the incr
+      perIncr = (dimCardinality[i] * incrValue) / CONST_HUNDRED;
+
+      // if per incr is more than one the add to cardinality
+      if (perIncr > 0) {
+        newDimsC[i] = dimCardinality[i] + perIncr;
+      } else {
+        // else add one
+        newDimsC[i] = dimCardinality[i] + 1;
+      }
+      // check whether its in boundary condition
+      remainder = newDimsC[i] % CONST_EIGHT;
+      if (remainder == CONST_SEVEN) {
+        // then incr cardinality by 1
+        newDimsC[i] = dimCardinality[i] + 1;
+      }
+    }
+    // get the log bits of cardinality
+    for (int i = 0; i < newDimsC.length; i++) {
+      newDimsC[i] = Long.toBinaryString(newDimsC[i]).length();
+    }
+    return newDimsC;
+  }
+
+  public static int getIncrementedCardinality(int dimCardinality) {
+    // get the cardinality incr factor
+    final int incrValue = CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL;
+
+    int perIncr = 0;
+    int remainder = 0;
+    int newDimsC = 0;
+
+    // get the incr
+    perIncr = (dimCardinality * incrValue) / CONST_HUNDRED;
+
+    // if per incr is more than one the add to cardinality
+    if (perIncr > 0) {
+      newDimsC = dimCardinality + perIncr;
+    } else {
+      // else add one
+      newDimsC = dimCardinality + 1;
+    }
+    // check whether its in boundary condition
+    remainder = newDimsC % CONST_EIGHT;
+    if (remainder == CONST_SEVEN) {
+      // then incr cardinality by 1
+      newDimsC = dimCardinality + 1;
+    }
+    newDimsC = Long.toBinaryString(newDimsC).length();
+    // get the log bits of cardinality
+
+    return newDimsC;
+  }
+
+  /**
+   * return ColumnGroupModel. check ColumnGroupModel for detail
+   *
+   * @param columnGroups : column groups
+   * @return ColumnGroupModel  model
+   */
+  public static ColumnGroupModel getColGroupModel(int[][] columnGroups) {
+    int[] columnSplit = new int[columnGroups.length];
+    int noOfColumnStore = columnSplit.length;
+    boolean[] columnarStore = new boolean[noOfColumnStore];
+
+    for (int i = 0; i < columnGroups.length; i++) {
+      columnSplit[i] = columnGroups[i].length;
+      columnarStore[i] = columnGroups[i].length > 1 ? false : true;
+    }
+    ColumnGroupModel colGroupModel = new ColumnGroupModel();
+    colGroupModel.setNoOfColumnStore(noOfColumnStore);
+    colGroupModel.setColumnSplit(columnSplit);
+    colGroupModel.setColumnarStore(columnarStore);
+    colGroupModel.setColumnGroup(columnGroups);
+    return colGroupModel;
+  }
+
+  /**
+   * This method will be used to update the dimension cardinality
+   *
+   * @param dimCardinality
+   * @return new increment cardinality
+   */
+  public static int[] getIncrementedCardinalityFullyFilled(int[] dimCardinality) {
+    int[] newDimsC = new int[dimCardinality.length];
+    // get the log bits of cardinality
+    for (int i = 0; i < dimCardinality.length; i++) {
+      if (dimCardinality[i] == 0) {
+        //Array or struct type may have higher value
+        newDimsC[i] = 64;
+      } else {
+        int bitsLength = Long.toBinaryString(dimCardinality[i]).length();
+        int div = bitsLength / 8;
+        int mod = bitsLength % 8;
+        if (mod > 0) {
+          newDimsC[i] = 8 * (div + 1);
+        } else {
+          newDimsC[i] = bitsLength;
+        }
+      }
+    }
+    return newDimsC;
+  }
+
+  private static int getBitLengthFullyFilled(int dimlens) {
+    int bitsLength = Long.toBinaryString(dimlens).length();
+    int div = bitsLength / 8;
+    int mod = bitsLength % 8;
+    if (mod > 0) {
+      return 8 * (div + 1);
+    } else {
+      return bitsLength;
+    }
+  }
+
+  /**
+   * This method will be used to delete the folder and files
+   *
+   * @param path file path array
+   * @throws Exception exception
+   */
+  public static void deleteFoldersAndFiles(final File... path) throws CarbonUtilException {
+    try {
+      UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
+
+        @Override public Void run() throws Exception {
+          for (int i = 0; i < path.length; i++) {
+            deleteRecursive(path[i]);
+          }
+          return null;
+        }
+      });
+    } catch (IOException e) {
+      throw new CarbonUtilException("Error while deleting the folders and files");
+    } catch (InterruptedException e) {
+      throw new CarbonUtilException("Error while deleting the folders and files");
+    }
+
+  }
+
+  /**
+   * Recursively delete the files
+   *
+   * @param f File to be deleted
+   * @throws CarbonUtilException
+   */
+  private static void deleteRecursive(File f) throws CarbonUtilException {
+    if (f.isDirectory()) {
+      if (f.listFiles() != null) {
+        for (File c : f.listFiles()) {
+          deleteRecursive(c);
+        }
+      }
+    }
+    if (f.exists() && !f.delete()) {
+      throw new CarbonUtilException("Error while deleting the folders and files");
+    }
+  }
+
+  public static void deleteFoldersAndFiles(final CarbonFile... file) throws CarbonUtilException {
+    try {
+      UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
+
+        @Override public Void run() throws Exception {
+          for (int i = 0; i < file.length; i++) {
+            deleteRecursive(file[i]);
+          }
+          return null;
+        }
+      });
+    } catch (IOException e) {
+      throw new CarbonUtilException("Error while deleting the folders and files");
+    } catch (InterruptedException e) {
+      throw new CarbonUtilException("Error while deleting the folders and files");
+    }
+  }
+
+  public static String getBadLogPath(String storeLocation) {
+    String badLogStoreLocation =
+            CarbonProperties.getInstance().getProperty(CarbonCommonConstants.CARBON_BADRECORDS_LOC);
+    badLogStoreLocation = badLogStoreLocation + File.separator + storeLocation;
+
+    return badLogStoreLocation;
+  }
+
+  public static void deleteFoldersAndFilesSilent(final CarbonFile... file)
+      throws CarbonUtilException {
+    try {
+      UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
+
+        @Override public Void run() throws Exception {
+          for (int i = 0; i < file.length; i++) {
+            deleteRecursiveSilent(file[i]);
+          }
+          return null;
+        }
+      });
+    } catch (IOException e) {
+      throw new CarbonUtilException("Error while deleting the folders and files");
+    } catch (InterruptedException e) {
+      throw new CarbonUtilException("Error while deleting the folders and files");
+    }
+  }
+
+  /**
+   * This function will rename the table to be deleted
+   *
+   * @param partitionCount
+   * @param storePath
+   * @param databaseName
+   * @param tableName
+   */
+  public static void renameTableForDeletion(int partitionCount, String storePath,
+      String databaseName, String tableName) {
+    String tableNameWithPartition = "";
+    String databaseNameWithPartition = "";
+    String fullPath = "";
+    String newFilePath = "";
+    String newFileName = "";
+    Callable<Void> c = null;
+    long time = System.currentTimeMillis();
+    FileFactory.FileType fileType = null;
+    ExecutorService executorService = Executors.newFixedThreadPool(10);
+    for (int i = 0; i < partitionCount; i++) {
+      databaseNameWithPartition = databaseName + '_' + i;
+      tableNameWithPartition = tableName + '_' + i;
+      newFileName = tableNameWithPartition + '_' + time;
+      fullPath = storePath + File.separator + databaseNameWithPartition + File.separator
+          + tableNameWithPartition;
+      newFilePath =
+          storePath + File.separator + databaseNameWithPartition + File.separator + newFileName;
+      fileType = FileFactory.getFileType(fullPath);
+      try {
+        if (FileFactory.isFileExist(fullPath, fileType)) {
+          CarbonFile file = FileFactory.getCarbonFile(fullPath, fileType);
+          boolean isRenameSuccessfull = file.renameTo(newFilePath);
+          if (!isRenameSuccessfull) {
+            LOGGER.error("Problem renaming the table :: " + fullPath);
+            c = new DeleteFolderAndFiles(file);
+            executorService.submit(c);
+          } else {
+            c = new DeleteFolderAndFiles(FileFactory.getCarbonFile(newFilePath, fileType));
+            executorService.submit(c);
+          }
+        }
+      } catch (IOException e) {
+        LOGGER.error("Problem renaming the table :: " + fullPath);
+      }
+    }
+    executorService.shutdown();
+  }
+
+  /**
+   * Recursively delete the files
+   *
+   * @param f File to be deleted
+   * @throws CarbonUtilException
+   */
+  private static void deleteRecursive(CarbonFile f) throws CarbonUtilException {
+    if (f.isDirectory()) {
+      if (f.listFiles() != null) {
+        for (CarbonFile c : f.listFiles()) {
+          deleteRecursive(c);
+        }
+      }
+    }
+    if (f.exists() && !f.delete()) {
+      throw new CarbonUtilException("Error while deleting the folders and files");
+    }
+  }
+
+  private static void deleteRecursiveSilent(CarbonFile f) throws CarbonUtilException {
+    if (f.isDirectory()) {
+      if (f.listFiles() != null) {
+        for (CarbonFile c : f.listFiles()) {
+          deleteRecursiveSilent(c);
+        }
+      }
+    }
+    if (f.exists() && !f.delete()) {
+      return;
+    }
+  }
+
+  public static void deleteFiles(File[] intermediateFiles) throws CarbonUtilException {
+    for (int i = 0; i < intermediateFiles.length; i++) {
+      if (!intermediateFiles[i].delete()) {
+        throw new CarbonUtilException("Problem while deleting intermediate file");
+      }
+    }
+  }
+
+  public static byte[] getKeyArray(ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolder,
+      int totalKeySize, int eachKeySize) {
+    byte[] completeKeyArray = new byte[totalKeySize];
+    byte[] keyBlockData = null;
+    int destinationPosition = 0;
+    int[] columnIndex = null;
+    int blockKeySize = 0;
+    for (int i = 0; i < columnarKeyStoreDataHolder.length; i++) {
+      keyBlockData = columnarKeyStoreDataHolder[i].getKeyBlockData();
+      blockKeySize = columnarKeyStoreDataHolder[i].getColumnarKeyStoreMetadata().getEachRowSize();
+      if (columnarKeyStoreDataHolder[i].getColumnarKeyStoreMetadata().isSorted()) {
+        for (int j = 0; j < keyBlockData.length; j += blockKeySize) {
+          System.arraycopy(keyBlockData, j, completeKeyArray, destinationPosition, blockKeySize);
+          destinationPosition += eachKeySize;
+        }
+      } else {
+        columnIndex = columnarKeyStoreDataHolder[i].getColumnarKeyStoreMetadata().getColumnIndex();
+
+        for (int j = 0; j < columnIndex.length; j++) {
+          System.arraycopy(keyBlockData, columnIndex[j] * blockKeySize, completeKeyArray,
+              eachKeySize * columnIndex[j] + destinationPosition, blockKeySize);
+        }
+      }
+      destinationPosition = blockKeySize;
+    }
+    return completeKeyArray;
+  }
+
+  public static byte[] getKeyArray(ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolder,
+      int totalKeySize, int eachKeySize, short[] columnIndex) {
+    byte[] completeKeyArray = new byte[totalKeySize];
+    byte[] keyBlockData = null;
+    int destinationPosition = 0;
+    int blockKeySize = 0;
+    for (int i = 0; i < columnarKeyStoreDataHolder.length; i++) {
+      keyBlockData = columnarKeyStoreDataHolder[i].getKeyBlockData();
+      blockKeySize = columnarKeyStoreDataHolder[i].getColumnarKeyStoreMetadata().getEachRowSize();
+
+      for (int j = 0; j < columnIndex.length; j++) {
+        System.arraycopy(keyBlockData, columnIndex[j] * blockKeySize, completeKeyArray,
+            destinationPosition, blockKeySize);
+        destinationPosition += eachKeySize;
+      }
+      destinationPosition = blockKeySize;
+    }
+    return completeKeyArray;
+  }
+
+  public static int getFirstIndexUsingBinarySearch(FixedLengthDimensionDataChunk dimColumnDataChunk,
+      int low, int high, byte[] compareValue, boolean matchUpLimit) {
+    int cmpResult = 0;
+    while (high >= low) {
+      int mid = (low + high) / 2;
+      cmpResult = ByteUtil.UnsafeComparer.INSTANCE
+          .compareTo(dimColumnDataChunk.getCompleteDataChunk(), mid * compareValue.length,
+              compareValue.length, compareValue, 0, compareValue.length);
+      if (cmpResult < 0) {
+        low = mid + 1;
+      } else if (cmpResult > 0) {
+        high = mid - 1;
+      } else {
+        int currentIndex = mid;
+        if(!matchUpLimit) {
+          while (currentIndex - 1 >= 0 && ByteUtil.UnsafeComparer.INSTANCE
+              .compareTo(dimColumnDataChunk.getCompleteDataChunk(),
+                  (currentIndex - 1) * compareValue.length, compareValue.length, compareValue, 0,
+                  compareValue.length) == 0) {
+            --currentIndex;
+          }
+        } else {
+          while (currentIndex + 1 <= high && ByteUtil.UnsafeComparer.INSTANCE
+              .compareTo(dimColumnDataChunk.getCompleteDataChunk(),
+                  (currentIndex + 1) * compareValue.length, compareValue.length, compareValue, 0,
+                  compareValue.length) == 0) {
+            currentIndex++;
+          }
+        }
+        return currentIndex;
+      }
+    }
+    return -(low + 1);
+  }
+
+  /**
+   * Method will identify the value which is lesser than the pivot element
+   * on which range filter is been applied.
+   *
+   * @param currentIndex
+   * @param dimColumnDataChunk
+   * @param compareValue
+   * @return index value
+   */
+  public static int nextLesserValueToTarget(int currentIndex,
+      FixedLengthDimensionDataChunk dimColumnDataChunk, byte[] compareValue) {
+    while (currentIndex - 1 >= 0 && ByteUtil.UnsafeComparer.INSTANCE
+        .compareTo(dimColumnDataChunk.getCompleteDataChunk(),
+            (currentIndex - 1) * compareValue.length, compareValue.length, compareValue, 0,
+            compareValue.length) >= 0) {
+      --currentIndex;
+    }
+
+    return --currentIndex;
+  }
+
+  /**
+   * Method will identify the value which is greater than the pivot element
+   * on which range filter is been applied.
+   *
+   * @param currentIndex
+   * @param dimColumnDataChunk
+   * @param compareValue
+   * @param numerOfRows
+   * @return index value
+   */
+  public static int nextGreaterValueToTarget(int currentIndex,
+      FixedLengthDimensionDataChunk dimColumnDataChunk, byte[] compareValue, int numerOfRows) {
+    while (currentIndex + 1 < numerOfRows && ByteUtil.UnsafeComparer.INSTANCE
+        .compareTo(dimColumnDataChunk.getCompleteDataChunk(),
+            (currentIndex + 1) * compareValue.length, compareValue.length, compareValue, 0,
+            compareValue.length) <= 0) {
+      ++currentIndex;
+    }
+
+    return ++currentIndex;
+  }
+
+  public static int[] getUnCompressColumnIndex(int totalLength, byte[] columnIndexData,
+      NumberCompressor numberCompressor) {
+    ByteBuffer buffer = ByteBuffer.wrap(columnIndexData);
+    buffer.rewind();
+    int indexDataLength = buffer.getInt();
+    byte[] indexData = new byte[indexDataLength];
+    byte[] indexMap =
+        new byte[totalLength - indexDataLength - CarbonCommonConstants.INT_SIZE_IN_BYTE];
+    buffer.get(indexData);
+    buffer.get(indexMap);
+    return UnBlockIndexer.uncompressIndex(numberCompressor.unCompress(indexData),
+        numberCompressor.unCompress(indexMap));
+  }
+
+  /**
+   * Convert int array to Integer list
+   *
+   * @param array
+   * @return List<Integer>
+   */
+  public static List<Integer> convertToIntegerList(int[] array) {
+    List<Integer> integers = new ArrayList<Integer>();
+    for (int i = 0; i < array.length; i++) {
+      integers.add(array[i]);
+    }
+    return integers;
+  }
+
+  /**
+   * Read level metadata file and return cardinality
+   *
+   * @param levelPath
+   * @return
+   * @throws CarbonUtilException
+   */
+  public static int[] getCardinalityFromLevelMetadataFile(String levelPath)
+      throws CarbonUtilException {
+    DataInputStream dataInputStream = null;
+    int[] cardinality = null;
+
+    try {
+      if (FileFactory.isFileExist(levelPath, FileFactory.getFileType(levelPath))) {
+        dataInputStream =
+            FileFactory.getDataInputStream(levelPath, FileFactory.getFileType(levelPath));
+
+        cardinality = new int[dataInputStream.readInt()];
+
+        for (int i = 0; i < cardinality.length; i++) {
+          cardinality[i] = dataInputStream.readInt();
+        }
+      }
+    } catch (FileNotFoundException e) {
+      throw new CarbonUtilException("Problem while getting the file", e);
+    } catch (IOException e) {
+      throw new CarbonUtilException("Problem while reading the file", e);
+    } finally {
+      closeStreams(dataInputStream);
+    }
+
+    return cardinality;
+  }
+
+  public static void writeLevelCardinalityFile(String loadFolderLoc, String tableName,
+      int[] dimCardinality) throws KettleException {
+    String levelCardinalityFilePath = loadFolderLoc + File.separator +
+        CarbonCommonConstants.LEVEL_METADATA_FILE + tableName
+        + CarbonCommonConstants.CARBON_METADATA_EXTENSION;
+    FileOutputStream fileOutputStream = null;
+    FileChannel channel = null;
+    try {
+      int dimCardinalityArrLength = dimCardinality.length;
+
+      // first four bytes for writing the length of array, remaining for array data
+      ByteBuffer buffer = ByteBuffer.allocate(CarbonCommonConstants.INT_SIZE_IN_BYTE
+          + dimCardinalityArrLength * CarbonCommonConstants.INT_SIZE_IN_BYTE);
+
+      fileOutputStream = new FileOutputStream(levelCardinalityFilePath);
+      channel = fileOutputStream.getChannel();
+      buffer.putInt(dimCardinalityArrLength);
+
+      for (int i = 0; i < dimCardinalityArrLength; i++) {
+        buffer.putInt(dimCardinality[i]);
+      }
+
+      buffer.flip();
+      channel.write(buffer);
+      buffer.clear();
+
+      LOGGER.info("Level cardinality file written to : " + levelCardinalityFilePath);
+    } catch (IOException e) {
+      LOGGER.error("Error while writing level cardinality file : " + levelCardinalityFilePath + e
+          .getMessage());
+      throw new KettleException("Not able to write level cardinality file", e);
+    } finally {
+      closeStreams(channel, fileOutputStream);
+    }
+  }
+
+  /**
+   * From beeline if a delimeter is passed as \001, in code we get it as
+   * escaped string as \\001. So this method will unescape the slash again and
+   * convert it back t0 \001
+   *
+   * @param parseStr
+   * @return
+   */
+  public static String unescapeChar(String parseStr) {
+    switch (parseStr) {
+      case "\\001":
+        return "\001";
+      case "\\t":
+        return "\t";
+      case "\\r":
+        return "\r";
+      case "\\b":
+        return "\b";
+      case "\\f":
+        return "\f";
+      case "\\n":
+        return "\n";
+      default:
+        return parseStr;
+    }
+  }
+
+  public static String escapeComplexDelimiterChar(String parseStr) {
+    switch (parseStr) {
+      case "$":
+        return "\\$";
+      case ":":
+        return "\\:";
+      default:
+        return parseStr;
+    }
+  }
+
+  /**
+   * Append HDFS Base Url for show create & load data sql
+   *
+   * @param filePath
+   */
+  public static String checkAndAppendHDFSUrl(String filePath) {
+    String currentPath = filePath;
+    if (null != filePath && filePath.length() != 0 &&
+        FileFactory.getFileType(filePath) != FileFactory.FileType.HDFS &&
+        FileFactory.getFileType(filePath) != FileFactory.FileType.VIEWFS) {
+      String baseDFSUrl = CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.CARBON_DDL_BASE_HDFS_URL);
+      if (null != baseDFSUrl) {
+        String dfsUrl = conf.get(FS_DEFAULT_FS);
+        if (null != dfsUrl && (dfsUrl.startsWith(HDFS_PREFIX) || dfsUrl
+            .startsWith(VIEWFS_PREFIX))) {
+          baseDFSUrl = dfsUrl + baseDFSUrl;
+        }
+        if (baseDFSUrl.endsWith("/")) {
+          baseDFSUrl = baseDFSUrl.substring(0, baseDFSUrl.length() - 1);
+        }
+        if (!filePath.startsWith("/")) {
+          filePath = "/" + filePath;
+        }
+        currentPath = baseDFSUrl + filePath;
+      }
+    }
+    return currentPath;
+  }
+
+  /**
+   * @param location
+   * @param factTableName
+   * @return
+   */
+  public static int getRestructureNumber(String location, String factTableName) {
+    String restructName =
+        location.substring(location.indexOf(CarbonCommonConstants.RESTRUCTRE_FOLDER));
+    int factTableIndex = restructName.indexOf(factTableName) - 1;
+    String restructNumber =
+        restructName.substring(CarbonCommonConstants.RESTRUCTRE_FOLDER.length(), factTableIndex);
+    return Integer.parseInt(restructNumber);
+  }
+
+  /**
+   * Below method will be used to get the aggregator type
+   * CarbonCommonConstants.SUM_COUNT_VALUE_MEASURE will return when value is double measure
+   * CarbonCommonConstants.BYTE_VALUE_MEASURE will be returned when value is byte array
+   *
+   * @param agg
+   * @return aggregator type
+   */
+  public static char getType(String agg) {
+    if (CarbonCommonConstants.SUM.equals(agg) || CarbonCommonConstants.COUNT.equals(agg)) {
+      return CarbonCommonConstants.SUM_COUNT_VALUE_MEASURE;
+    } else {
+      return CarbonCommonConstants.BYTE_VALUE_MEASURE;
+    }
+  }
+
+  public static String getCarbonStorePath(String databaseName, String tableName) {
+    CarbonProperties prop = CarbonProperties.getInstance();
+    if (null == prop) {
+      return null;
+    }
+    String basePath = prop.getProperty(CarbonCommonConstants.STORE_LOCATION,
+        CarbonCommonConstants.STORE_LOCATION_DEFAULT_VAL);
+    return basePath;
+  }
+
+  /**
+   * This method will check the existence of a file at a given path
+   */
+  public static boolean isFileExists(String fileName) {
+    try {
+      FileFactory.FileType fileType = FileFactory.getFileType(fileName);
+      if (FileFactory.isFileExist(fileName, fileType)) {
+        return true;
+      }
+    } catch (IOException e) {
+      LOGGER.error("@@@@@@  File not found at a given location @@@@@@ : " + fileName);
+    }
+    return false;
+  }
+
+  /**
+   * This method will check and create the given path
+   */
+  public static boolean checkAndCreateFolder(String path) {
+    boolean created = false;
+    try {
+      FileFactory.FileType fileType = FileFactory.getFileType(path);
+      if (FileFactory.isFileExist(path, fileType)) {
+        created = true;
+      } else {
+        created = FileFactory.mkdirs(path, fileType);
+      }
+    } catch (IOException e) {
+      LOGGER.error(e.getMessage());
+    }
+    return created;
+  }
+
+  /**
+   * This method will return the size of a given file
+   */
+  public static long getFileSize(String filePath) {
+    FileFactory.FileType fileType = FileFactory.getFileType(filePath);
+    CarbonFile carbonFile = FileFactory.getCarbonFile(filePath, fileType);
+    return carbonFile.getSize();
+  }
+
+  /**
+   * This method will be used to get bit length of the dimensions based on the
+   * dimension partitioner. If partitioner is value is 1 the column
+   * cardinality will be incremented in such a way it will fit in byte level.
+   * for example if number of bits required to store one column value is 3
+   * bits the 8 bit will be assigned to each value of that column.In this way
+   * we may waste some bits(maximum 7 bits) If partitioner value is more than
+   * 1 then few column are stored together. so cardinality of that group will
+   * be incremented to fit in byte level For example: if cardinality for 3
+   * columns stored together is [1,1,1] then number of bits required will be
+   * [1,1,1] then last value will be incremented and it will become[1,1,6]
+   *
+   * @param dimCardinality cardinality of each column
+   * @param dimPartitioner Partitioner is how column is stored if value is 1 then column
+   *                       wise if value is more than 1 then it is in group with other
+   *                       column
+   * @return number of bits for each column
+   * @TODO for row group only last value is incremented problem in this cases
+   * in if last column in that group is selected most of the time in
+   * filter query Comparison will be more if it incremented uniformly
+   * then comparison will be distributed
+   */
+  public static int[] getDimensionBitLength(int[] dimCardinality, int[] dimPartitioner) {
+    int[] bitLength = new int[dimCardinality.length];
+    int dimCounter = 0;
+    for (int i = 0; i < dimPartitioner.length; i++) {
+      if (dimPartitioner[i] == 1) {
+        // for columnar store
+        // fully filled bits means complete byte or number of bits
+        // assigned will be in
+        // multiplication of 8
+        bitLength[dimCounter] = getBitLengthFullyFilled(dimCardinality[dimCounter]);
+        dimCounter++;
+      } else {
+        // for row store
+        int totalSize = 0;
+        for (int j = 0; j < dimPartitioner[i]; j++) {
+          bitLength[dimCounter] = getIncrementedCardinality(dimCardinality[dimCounter]);
+          totalSize += bitLength[dimCounter];
+          dimCounter++;
+        }
+        // below code is to increment in such a way that row group will
+        // be stored
+        // as byte level
+        int mod = totalSize % 8;
+        if (mod > 0) {
+          bitLength[dimCounter - 1] = bitLength[dimCounter - 1] + (8 - mod);
+        }
+      }
+    }
+    return bitLength;
+  }
+
+  /**
+   * Below method will be used to get the value compression model of the
+   * measure data chunk
+   *
+   * @param measureDataChunkList
+   * @return value compression model
+   */
+  public static ValueCompressionModel getValueCompressionModel(
+      List<DataChunk> measureDataChunkList) {
+    Object[] maxValue = new Object[measureDataChunkList.size()];
+    Object[] minValue = new Object[measureDataChunkList.size()];
+    Object[] uniqueValue = new Object[measureDataChunkList.size()];
+    int[] decimal = new int[measureDataChunkList.size()];
+    char[] type = new char[measureDataChunkList.size()];
+    byte[] dataTypeSelected = new byte[measureDataChunkList.size()];
+
+    /**
+     * to fill the meta data required for value compression model
+     */
+    for (int i = 0; i < dataTypeSelected.length; i++) {
+      int indexOf = measureDataChunkList.get(i).getEncodingList().indexOf(Encoding.DELTA);
+      if (indexOf > -1) {
+        ValueEncoderMeta valueEncoderMeta =
+            measureDataChunkList.get(i).getValueEncoderMeta().get(indexOf);
+        maxValue[i] = valueEncoderMeta.getMaxValue();
+        minValue[i] = valueEncoderMeta.getMinValue();
+        uniqueValue[i] = valueEncoderMeta.getUniqueValue();
+        decimal[i] = valueEncoderMeta.getDecimal();
+        type[i] = valueEncoderMeta.getType();
+        dataTypeSelected[i] = valueEncoderMeta.getDataTypeSelected();
+      }
+    }
+    MeasureMetaDataModel measureMetadataModel =
+        new MeasureMetaDataModel(minValue, maxValue, decimal, dataTypeSelected.length, uniqueValue,
+            type, dataTypeSelected);
+    return ValueCompressionUtil.getValueCompressionModel(measureMetadataModel);
+  }
+
+  /**
+   * Below method will be used to check whether particular encoding is present
+   * in the dimension or not
+   *
+   * @param encoding  encoding to search
+   * @return if encoding is present in dimension
+   */
+  public static boolean hasEncoding(List<Encoding> encodings, Encoding encoding) {
+    return encodings.contains(encoding);
+  }
+
+  /**
+   * below method is to check whether data type is present in the data type array
+   *
+   * @param dataType  data type to be searched
+   * @param dataTypes all data types
+   * @return if data type is present
+   */
+  public static boolean hasDataType(DataType dataType, DataType[] dataTypes) {
+    for (int i = 0; i < dataTypes.length; i++) {
+      if (dataType.equals(dataTypes[i])) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * below method is to check whether it is complex data type
+   *
+   * @param dataType  data type to be searched
+   * @return if data type is present
+   */
+  public static boolean hasComplexDataType(DataType dataType) {
+    switch (dataType) {
+      case ARRAY :
+      case STRUCT:
+      case MAP:
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  public static boolean[] getDictionaryEncodingArray(QueryDimension[] queryDimensions) {
+    boolean[] dictionaryEncodingArray = new boolean[queryDimensions.length];
+    for (int i = 0; i < queryDimensions.length; i++) {
+      dictionaryEncodingArray[i] =
+          queryDimensions[i].getDimension().hasEncoding(Encoding.DICTIONARY);
+    }
+    return dictionaryEncodingArray;
+  }
+
+  public static boolean[] getDirectDictionaryEncodingArray(QueryDimension[] queryDimensions) {
+    boolean[] dictionaryEncodingArray = new boolean[queryDimensions.length];
+    for (int i = 0; i < queryDimensions.length; i++) {
+      dictionaryEncodingArray[i] =
+          queryDimensions[i].getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY);
+    }
+    return dictionaryEncodingArray;
+  }
+
+  public static boolean[] getComplexDataTypeArray(QueryDimension[] queryDimensions) {
+    boolean[] dictionaryEncodingArray = new boolean[queryDimensions.length];
+    for (int i = 0; i < queryDimensions.length; i++) {
+      dictionaryEncodingArray[i] =
+          CarbonUtil.hasComplexDataType(queryDimensions[i].getDimension().getDataType());
+    }
+    return dictionaryEncodingArray;
+  }
+
+  /**
+   * Below method will be used to read the data file matadata
+   *
+   * @param filePath file path
+   * @param blockOffset   offset in the file
+   * @return Data file metadata instance
+   * @throws CarbonUtilException
+   */
+  public static DataFileFooter readMetadatFile(String filePath, long blockOffset, long blockLength)
+      throws CarbonUtilException {
+    DataFileFooterConverter fileFooterConverter = new DataFileFooterConverter();
+    try {
+      return fileFooterConverter.readDataFileFooter(filePath, blockOffset, blockLength);
+    } catch (IOException e) {
+      throw new CarbonUtilException("Problem while reading the file metadata", e);
+    }
+  }
+
+  /**
+   * Below method will be used to get the surrogate key
+   *
+   * @param data   actual data
+   * @param buffer byte buffer which will be used to convert the data to integer value
+   * @return surrogate key
+   */
+  public static int getSurrogateKey(byte[] data, ByteBuffer buffer) {
+    int lenght = 4 - data.length;
+    for (int i = 0; i < lenght; i++) {
+      buffer.put((byte) 0);
+    }
+    buffer.put(data);
+    buffer.rewind();
+    int surrogate = buffer.getInt();
+    buffer.clear();
+    return surrogate;
+  }
+
+  /**
+   * Thread to delete the tables
+   *
+   */
+  private static final class DeleteFolderAndFiles implements Callable<Void> {
+    private CarbonFile file;
+
+    private DeleteFolderAndFiles(CarbonFile file) {
+      this.file = file;
+    }
+
+    @Override public Void call() throws Exception {
+      deleteFoldersAndFiles(file);
+      return null;
+    }
+
+  }
+
+  /**
+   * class to sort aggregate folder list in descending order
+   */
+  private static class AggTableComparator implements Comparator<String> {
+    public int compare(String aggTable1, String aggTable2) {
+      int index1 = aggTable1.lastIndexOf(CarbonCommonConstants.UNDERSCORE);
+      int index2 = aggTable2.lastIndexOf(CarbonCommonConstants.UNDERSCORE);
+      int n1 = Integer.parseInt(aggTable1.substring(index1 + 1));
+      int n2 = Integer.parseInt(aggTable2.substring(index2 + 1));
+      if (n1 > n2) {
+        return -1;
+      } else if (n1 < n2) {
+        return 1;
+      } else {
+        return 0;
+      }
+    }
+  }
+
+  /**
+   * Below method will be used to get the dimension
+   *
+   * @param tableDimensionList table dimension list
+   * @return boolean array specifying true if dimension is dictionary
+   * and false if dimension is not a dictionary column
+   */
+  public static boolean[] identifyDimensionType(List<CarbonDimension> tableDimensionList) {
+    List<Boolean> isDictionaryDimensions = new ArrayList<Boolean>();
+    Set<Integer> processedColumnGroup = new HashSet<Integer>();
+    for (CarbonDimension carbonDimension : tableDimensionList) {
+      List<CarbonDimension> childs = carbonDimension.getListOfChildDimensions();
+      //assuming complex dimensions will always be atlast
+      if(null != childs && childs.size() > 0) {
+        break;
+      }
+      if (carbonDimension.isColumnar() && hasEncoding(carbonDimension.getEncoder(),
+          Encoding.DICTIONARY)) {
+        isDictionaryDimensions.add(true);
+      } else if (!carbonDimension.isColumnar()) {
+        if (processedColumnGroup.add(carbonDimension.columnGroupId())) {
+          isDictionaryDimensions.add(true);
+        }
+      } else {
+        isDictionaryDimensions.add(false);
+      }
+    }
+    boolean[] primitive = ArrayUtils
+        .toPrimitive(isDictionaryDimensions.toArray(new Boolean[isDictionaryDimensions.size()]));
+    return primitive;
+  }
+
+  /**
+   * This method will form one single byte [] for all the high card dims.
+   * First it will add all the indexes of variable length byte[] and then the
+   * actual value
+   *
+   * @param byteBufferArr
+   * @return byte[] key.
+   */
+  public static byte[] packByteBufferIntoSingleByteArray(ByteBuffer[] byteBufferArr) {
+    // for empty array means there is no data to remove dictionary.
+    if (null == byteBufferArr || byteBufferArr.length == 0) {
+      return null;
+    }
+    int noOfCol = byteBufferArr.length;
+    short offsetLen = (short) (noOfCol * 2);
+    int totalBytes = calculateTotalBytes(byteBufferArr) + offsetLen;
+    ByteBuffer buffer = ByteBuffer.allocate(totalBytes);
+    // writing the offset of the first element.
+    buffer.putShort(offsetLen);
+
+    // prepare index for byte []
+    for (int index = 0; index < byteBufferArr.length - 1; index++) {
+      ByteBuffer individualCol = byteBufferArr[index];
+      int noOfBytes = individualCol.capacity();
+      buffer.putShort((short) (offsetLen + noOfBytes));
+      offsetLen += noOfBytes;
+      individualCol.rewind();
+    }
+
+    // put actual data.
+    for (int index = 0; index < byteBufferArr.length; index++) {
+      ByteBuffer individualCol = byteBufferArr[index];
+      buffer.put(individualCol.array());
+    }
+
+    buffer.rewind();
+    return buffer.array();
+
+  }
+
+  /**
+   * To calculate the total bytes in byte Buffer[].
+   *
+   * @param byteBufferArr
+   * @return
+   */
+  private static int calculateTotalBytes(ByteBuffer[] byteBufferArr) {
+    int total = 0;
+    for (int index = 0; index < byteBufferArr.length; index++) {
+      total += byteBufferArr[index].capacity();
+    }
+    return total;
+  }
+
+  /**
+   * Find the dimension from metadata by using unique name. As of now we are
+   * taking level name as unique name. But user needs to give one unique name
+   * for each level,that level he needs to mention in query.
+   *
+   * @param dimensions
+   * @param carbonDim
+   * @return
+   */
+  public static CarbonDimension findDimension(List<CarbonDimension> dimensions, String carbonDim) {
+    CarbonDimension findDim = null;
+    for (CarbonDimension dimension : dimensions) {
+      if (dimension.getColName().equalsIgnoreCase(carbonDim)) {
+        findDim = dimension;
+        break;
+      }
+    }
+    return findDim;
+  }
+
+  /**
+   * This method will be used to clear the dictionary cache after its usage is complete
+   * so that if memory threshold is reached it can evicted from LRU cache
+   *
+   * @param dictionary
+   */
+  public static void clearDictionaryCache(Dictionary dictionary) {
+    if (null != dictionary) {
+      dictionary.clear();
+    }
+  }
+
+  /**
+   * convert from wrapper to external data type
+   *
+   * @param dataType
+   * @return
+   */
+  public static org.apache.carbondata.format.DataType fromWrapperToExternalDataType(
+      DataType dataType) {
+
+    if (null == dataType) {
+      return null;
+    }
+    switch (dataType) {
+      case STRING:
+        return org.apache.carbondata.format.DataType.STRING;
+      case INT:
+        return org.apache.carbondata.format.DataType.INT;
+      case LONG:
+        return org.apache.carbondata.format.DataType.LONG;
+      case DOUBLE:
+        return org.apache.carbondata.format.DataType.DOUBLE;
+      case DECIMAL:
+        return org.apache.carbondata.format.DataType.DECIMAL;
+      case TIMESTAMP:
+        return org.apache.carbondata.format.DataType.TIMESTAMP;
+      case ARRAY:
+        return org.apache.carbondata.format.DataType.ARRAY;
+      case STRUCT:
+        return org.apache.carbondata.format.DataType.STRUCT;
+      default:
+        return org.apache.carbondata.format.DataType.STRING;
+    }
+  }
+
+  /**
+   * convert from external to wrapper data type
+   *
+   * @param dataType
+   * @return
+   */
+  public static DataType fromExternalToWrapperDataType(
+      org.apache.carbondata.format.DataType dataType) {
+    if (null == dataType) {
+      return null;
+    }
+    switch (dataType) {
+      case STRING:
+        return DataType.STRING;
+      case INT:
+        return DataType.INT;
+      case LONG:
+        return DataType.LONG;
+      case DOUBLE:
+        return DataType.DOUBLE;
+      case DECIMAL:
+        return DataType.DECIMAL;
+      case TIMESTAMP:
+        return DataType.TIMESTAMP;
+      case ARRAY:
+        return DataType.ARRAY;
+      case STRUCT:
+        return DataType.STRUCT;
+      default:
+        return DataType.STRING;
+    }
+  }
+  /**
+   * @param dictionaryColumnCardinality
+   * @param wrapperColumnSchemaList
+   * @return It returns formatted cardinality by adding -1 value for NoDictionary columns
+   */
+  public static int[] getFormattedCardinality(int[] dictionaryColumnCardinality,
+      List<ColumnSchema> wrapperColumnSchemaList) {
+    List<Integer> cardinality = new ArrayList<>();
+    int counter = 0;
+    for (int i = 0; i < wrapperColumnSchemaList.size(); i++) {
+      if (CarbonUtil.hasEncoding(wrapperColumnSchemaList.get(i).getEncodingList(),
+          org.apache.carbondata.core.carbon.metadata.encoder.Encoding.DICTIONARY)) {
+        cardinality.add(dictionaryColumnCardinality[counter]);
+        counter++;
+      } else if (!wrapperColumnSchemaList.get(i).isDimensionColumn()) {
+        continue;
+      } else {
+        cardinality.add(-1);
+      }
+    }
+    return ArrayUtils.toPrimitive(cardinality.toArray(new Integer[cardinality.size()]));
+  }
+
+  public static List<ColumnSchema> getColumnSchemaList(List<CarbonDimension> carbonDimensionsList,
+      List<CarbonMeasure> carbonMeasureList) {
+    List<ColumnSchema> wrapperColumnSchemaList = new ArrayList<ColumnSchema>();
+    fillCollumnSchemaListForComplexDims(carbonDimensionsList, wrapperColumnSchemaList);
+    for (CarbonMeasure carbonMeasure : carbonMeasureList) {
+      wrapperColumnSchemaList.add(carbonMeasure.getColumnSchema());
+    }
+    return wrapperColumnSchemaList;
+  }
+
+  private static void fillCollumnSchemaListForComplexDims(
+      List<CarbonDimension> carbonDimensionsList, List<ColumnSchema> wrapperColumnSchemaList) {
+    for (CarbonDimension carbonDimension : carbonDimensionsList) {
+      wrapperColumnSchemaList.add(carbonDimension.getColumnSchema());
+      List<CarbonDimension> childDims = carbonDimension.getListOfChildDimensions();
+      if (null != childDims && childDims.size() > 0) {
+        fillCollumnSchemaListForComplexDims(childDims, wrapperColumnSchemaList);
+      }
+    }
+  }
+  /**
+   * Below method will be used to get all the block index info from index file
+   *
+   * @param taskId                  task id of the file
+   * @param tableBlockInfoList      list of table block
+   * @param absoluteTableIdentifier absolute table identifier
+   * @return list of block info
+   * @throws CarbonUtilException if any problem while reading
+   */
+  public static List<DataFileFooter> readCarbonIndexFile(String taskId,
+      List<TableBlockInfo> tableBlockInfoList, AbsoluteTableIdentifier absoluteTableIdentifier)
+      throws CarbonUtilException {
+    // need to sort the  block info list based for task in ascending  order so
+    // it will be sinkup with block index read from file
+    Collections.sort(tableBlockInfoList);
+    CarbonTablePath carbonTablePath = CarbonStorePath
+        .getCarbonTablePath(absoluteTableIdentifier.getStorePath(),
+            absoluteTableIdentifier.getCarbonTableIdentifier());
+    // geting the index file path
+    //TODO need to pass proper partition number when partiton will be supported
+    String carbonIndexFilePath = carbonTablePath
+        .getCarbonIndexFilePath(taskId, "0", tableBlockInfoList.get(0).getSegmentId());
+    DataFileFooterConverter fileFooterConverter = new DataFileFooterConverter();
+    try {
+      // read the index info and return
+      return fileFooterConverter.getIndexInfo(carbonIndexFilePath, tableBlockInfoList);
+    } catch (IOException e) {
+      throw new CarbonUtilException("Problem while reading the file metadata", e);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/CarbonUtilException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtilException.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtilException.java
new file mode 100644
index 0000000..bdeed7f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtilException.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+import java.util.Locale;
+
+public class CarbonUtilException extends Exception {
+
+  /**
+   * default serial version ID.
+   */
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * The Error message.
+   */
+  private String msg = "";
+
+  /**
+   * Constructor
+   *
+   * @param msg The error message for this exception.
+   */
+  public CarbonUtilException(String msg) {
+    super(msg);
+    this.msg = msg;
+  }
+
+  /**
+   * Constructor
+   *
+   * @param msg The error message for this exception.
+   */
+  public CarbonUtilException(String msg, Throwable t) {
+    super(msg, t);
+    this.msg = msg;
+  }
+
+  /**
+   * This method is used to get the localized message.
+   *
+   * @param locale - A Locale object represents a specific geographical,
+   *               political, or cultural region.
+   * @return - Localized error message.
+   */
+  public String getLocalizedMessage(Locale locale) {
+    return "";
+  }
+
+  /**
+   * getLocalizedMessage
+   */
+  @Override public String getLocalizedMessage() {
+    return super.getLocalizedMessage();
+  }
+
+  /**
+   * getMessage
+   */
+  public String getMessage() {
+    return this.msg;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/DataFileFooterConverter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/DataFileFooterConverter.java b/core/src/main/java/org/apache/carbondata/core/util/DataFileFooterConverter.java
new file mode 100644
index 0000000..3a1da8c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/DataFileFooterConverter.java
@@ -0,0 +1,475 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.util;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.block.TableBlockInfo;
+import org.apache.carbondata.core.carbon.metadata.blocklet.BlockletInfo;
+import org.apache.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
+import org.apache.carbondata.core.carbon.metadata.blocklet.SegmentInfo;
+import org.apache.carbondata.core.carbon.metadata.blocklet.compressor.ChunkCompressorMeta;
+import org.apache.carbondata.core.carbon.metadata.blocklet.compressor.CompressionCodec;
+import org.apache.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
+import org.apache.carbondata.core.carbon.metadata.blocklet.datachunk.PresenceMeta;
+import org.apache.carbondata.core.carbon.metadata.blocklet.index.BlockletBTreeIndex;
+import org.apache.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex;
+import org.apache.carbondata.core.carbon.metadata.blocklet.index.BlockletMinMaxIndex;
+import org.apache.carbondata.core.carbon.metadata.blocklet.sort.SortState;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.metadata.ValueEncoderMeta;
+import org.apache.carbondata.core.reader.CarbonFooterReader;
+import org.apache.carbondata.core.reader.CarbonIndexFileReader;
+import org.apache.carbondata.format.BlockIndex;
+import org.apache.carbondata.format.FileFooter;
+
+/**
+ * Below class will be used to convert the thrift object of data file
+ * meta data to wrapper object
+ */
+public class DataFileFooterConverter {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(DataFileFooterConverter.class.getName());
+
+  /**
+   * Below method will be used to get the index info from index file
+   *
+   * @param filePath           file path of the index file
+   * @param tableBlockInfoList table block index
+   * @return list of index info
+   * @throws IOException problem while reading the index file
+   */
+  public List<DataFileFooter> getIndexInfo(String filePath, List<TableBlockInfo> tableBlockInfoList)
+      throws IOException {
+    CarbonIndexFileReader indexReader = new CarbonIndexFileReader();
+    List<DataFileFooter> dataFileFooters = new ArrayList<DataFileFooter>();
+    try {
+      // open the reader
+      indexReader.openThriftReader(filePath);
+      // get the index header
+      org.apache.carbondata.format.IndexHeader readIndexHeader = indexReader.readIndexHeader();
+      List<ColumnSchema> columnSchemaList = new ArrayList<ColumnSchema>();
+      List<org.apache.carbondata.format.ColumnSchema> table_columns =
+          readIndexHeader.getTable_columns();
+      for (int i = 0; i < table_columns.size(); i++) {
+        columnSchemaList.add(thriftColumnSchmeaToWrapperColumnSchema(table_columns.get(i)));
+      }
+      // get the segment info
+      SegmentInfo segmentInfo = getSegmentInfo(readIndexHeader.getSegment_info());
+      BlockletIndex blockletIndex = null;
+      int counter = 0;
+      DataFileFooter dataFileFooter = null;
+      // read the block info from file
+      while (indexReader.hasNext()) {
+        BlockIndex readBlockIndexInfo = indexReader.readBlockIndexInfo();
+        blockletIndex = getBlockletIndex(readBlockIndexInfo.getBlock_index());
+        dataFileFooter = new DataFileFooter();
+        dataFileFooter.setBlockletIndex(blockletIndex);
+        dataFileFooter.setColumnInTable(columnSchemaList);
+        dataFileFooter.setNumberOfRows(readBlockIndexInfo.getNum_rows());
+        dataFileFooter.setTableBlockInfo(tableBlockInfoList.get(counter++));
+        dataFileFooter.setSegmentInfo(segmentInfo);
+        dataFileFooters.add(dataFileFooter);
+      }
+    } finally {
+      indexReader.closeThriftReader();
+    }
+    return dataFileFooters;
+  }
+
+  /**
+   * Below method will be used to convert thrift file meta to wrapper file meta
+   */
+  public DataFileFooter readDataFileFooter(String filePath, long blockOffset, long blockLength)
+      throws IOException {
+    DataFileFooter dataFileFooter = new DataFileFooter();
+    FileHolder fileReader = null;
+    try {
+      long completeBlockLength = blockOffset + blockLength;
+      long footerPointer = completeBlockLength - 8;
+      fileReader = FileFactory.getFileHolder(FileFactory.getFileType(filePath));
+      long actualFooterOffset = fileReader.readLong(filePath, footerPointer);
+      CarbonFooterReader reader = new CarbonFooterReader(filePath, actualFooterOffset);
+      FileFooter footer = reader.readFooter();
+      dataFileFooter.setVersionId(footer.getVersion());
+      dataFileFooter.setNumberOfRows(footer.getNum_rows());
+      dataFileFooter.setSegmentInfo(getSegmentInfo(footer.getSegment_info()));
+      List<ColumnSchema> columnSchemaList = new ArrayList<ColumnSchema>();
+      List<org.apache.carbondata.format.ColumnSchema> table_columns = footer.getTable_columns();
+      for (int i = 0; i < table_columns.size(); i++) {
+        columnSchemaList.add(thriftColumnSchmeaToWrapperColumnSchema(table_columns.get(i)));
+      }
+      dataFileFooter.setColumnInTable(columnSchemaList);
+
+      List<org.apache.carbondata.format.BlockletIndex> leaf_node_indices_Thrift =
+          footer.getBlocklet_index_list();
+      List<BlockletIndex> blockletIndexList = new ArrayList<BlockletIndex>();
+      for (int i = 0; i < leaf_node_indices_Thrift.size(); i++) {
+        BlockletIndex blockletIndex = getBlockletIndex(leaf_node_indices_Thrift.get(i));
+        blockletIndexList.add(blockletIndex);
+      }
+
+      List<org.apache.carbondata.format.BlockletInfo> leaf_node_infos_Thrift =
+          footer.getBlocklet_info_list();
+      List<BlockletInfo> blockletInfoList = new ArrayList<BlockletInfo>();
+      for (int i = 0; i < leaf_node_infos_Thrift.size(); i++) {
+        BlockletInfo blockletInfo = getBlockletInfo(leaf_node_infos_Thrift.get(i));
+        blockletInfo.setBlockletIndex(blockletIndexList.get(i));
+        blockletInfoList.add(blockletInfo);
+      }
+      dataFileFooter.setBlockletList(blockletInfoList);
+      dataFileFooter.setBlockletIndex(getBlockletIndexForDataFileFooter(blockletIndexList));
+    } finally {
+      if (null != fileReader) {
+        fileReader.finish();
+      }
+    }
+    return dataFileFooter;
+  }
+
+  /**
+   * Below method will be used to get blocklet index for data file meta
+   *
+   * @param blockletIndexList
+   * @return blocklet index
+   */
+  private BlockletIndex getBlockletIndexForDataFileFooter(List<BlockletIndex> blockletIndexList) {
+    BlockletIndex blockletIndex = new BlockletIndex();
+    BlockletBTreeIndex blockletBTreeIndex = new BlockletBTreeIndex();
+    blockletBTreeIndex.setStartKey(blockletIndexList.get(0).getBtreeIndex().getStartKey());
+    blockletBTreeIndex
+        .setEndKey(blockletIndexList.get(blockletIndexList.size() - 1).getBtreeIndex().getEndKey());
+    blockletIndex.setBtreeIndex(blockletBTreeIndex);
+    byte[][] currentMinValue = blockletIndexList.get(0).getMinMaxIndex().getMinValues().clone();
+    byte[][] currentMaxValue = blockletIndexList.get(0).getMinMaxIndex().getMaxValues().clone();
+    byte[][] minValue = null;
+    byte[][] maxValue = null;
+    for (int i = 1; i < blockletIndexList.size(); i++) {
+      minValue = blockletIndexList.get(i).getMinMaxIndex().getMinValues();
+      maxValue = blockletIndexList.get(i).getMinMaxIndex().getMaxValues();
+      for (int j = 0; j < maxValue.length; j++) {
+        if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(currentMinValue[j], minValue[j]) > 0) {
+          currentMinValue[j] = minValue[j].clone();
+        }
+        if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(currentMaxValue[j], maxValue[j]) < 0) {
+          currentMaxValue[j] = maxValue[j].clone();
+        }
+      }
+    }
+
+    BlockletMinMaxIndex minMax = new BlockletMinMaxIndex();
+    minMax.setMaxValues(currentMaxValue);
+    minMax.setMinValues(currentMinValue);
+    blockletIndex.setMinMaxIndex(minMax);
+    return blockletIndex;
+  }
+
+  private ColumnSchema thriftColumnSchmeaToWrapperColumnSchema(
+      org.apache.carbondata.format.ColumnSchema externalColumnSchema) {
+    ColumnSchema wrapperColumnSchema = new ColumnSchema();
+    wrapperColumnSchema.setColumnUniqueId(externalColumnSchema.getColumn_id());
+    wrapperColumnSchema.setColumnName(externalColumnSchema.getColumn_name());
+    wrapperColumnSchema.setColumnar(externalColumnSchema.isColumnar());
+    wrapperColumnSchema
+        .setDataType(thriftDataTyopeToWrapperDataType(externalColumnSchema.data_type));
+    wrapperColumnSchema.setDimensionColumn(externalColumnSchema.isDimension());
+    List<Encoding> encoders = new ArrayList<Encoding>();
+    for (org.apache.carbondata.format.Encoding encoder : externalColumnSchema.getEncoders()) {
+      encoders.add(fromExternalToWrapperEncoding(encoder));
+    }
+    wrapperColumnSchema.setEncodingList(encoders);
+    wrapperColumnSchema.setNumberOfChild(externalColumnSchema.getNum_child());
+    wrapperColumnSchema.setPrecision(externalColumnSchema.getPrecision());
+    wrapperColumnSchema.setColumnGroup(externalColumnSchema.getColumn_group_id());
+    wrapperColumnSchema.setScale(externalColumnSchema.getScale());
+    wrapperColumnSchema.setDefaultValue(externalColumnSchema.getDefault_value());
+    wrapperColumnSchema.setAggregateFunction(externalColumnSchema.getAggregate_function());
+    return wrapperColumnSchema;
+  }
+
+  /**
+   * Below method is to convert the blocklet info of the thrift to wrapper
+   * blocklet info
+   *
+   * @param blockletInfoThrift blocklet info of the thrift
+   * @return blocklet info wrapper
+   */
+  private BlockletInfo getBlockletInfo(
+      org.apache.carbondata.format.BlockletInfo blockletInfoThrift) {
+    BlockletInfo blockletInfo = new BlockletInfo();
+    List<DataChunk> dimensionColumnChunk = new ArrayList<DataChunk>();
+    List<DataChunk> measureChunk = new ArrayList<DataChunk>();
+    Iterator<org.apache.carbondata.format.DataChunk> column_data_chunksIterator =
+        blockletInfoThrift.getColumn_data_chunksIterator();
+    if (null != column_data_chunksIterator) {
+      while (column_data_chunksIterator.hasNext()) {
+        org.apache.carbondata.format.DataChunk next = column_data_chunksIterator.next();
+        if (next.isRowMajor()) {
+          dimensionColumnChunk.add(getDataChunk(next, false));
+        } else if (next.getEncoders().contains(org.apache.carbondata.format.Encoding.DELTA)) {
+          measureChunk.add(getDataChunk(next, true));
+        } else {
+          dimensionColumnChunk.add(getDataChunk(next, false));
+        }
+      }
+    }
+    blockletInfo.setDimensionColumnChunk(dimensionColumnChunk);
+    blockletInfo.setMeasureColumnChunk(measureChunk);
+    blockletInfo.setNumberOfRows(blockletInfoThrift.getNum_rows());
+    return blockletInfo;
+  }
+
+  /**
+   * Below method is convert the thrift encoding to wrapper encoding
+   *
+   * @param encoderThrift thrift encoding
+   * @return wrapper encoding
+   */
+  private Encoding fromExternalToWrapperEncoding(
+      org.apache.carbondata.format.Encoding encoderThrift) {
+    switch (encoderThrift) {
+      case DICTIONARY:
+        return Encoding.DICTIONARY;
+      case DELTA:
+        return Encoding.DELTA;
+      case RLE:
+        return Encoding.RLE;
+      case INVERTED_INDEX:
+        return Encoding.INVERTED_INDEX;
+      case BIT_PACKED:
+        return Encoding.BIT_PACKED;
+      case DIRECT_DICTIONARY:
+        return Encoding.DIRECT_DICTIONARY;
+      default:
+        return Encoding.DICTIONARY;
+    }
+  }
+
+  /**
+   * Below method will be used to convert the thrift compression to wrapper
+   * compression codec
+   *
+   * @param compressionCodecThrift
+   * @return wrapper compression codec
+   */
+  private CompressionCodec getCompressionCodec(
+      org.apache.carbondata.format.CompressionCodec compressionCodecThrift) {
+    switch (compressionCodecThrift) {
+      case SNAPPY:
+        return CompressionCodec.SNAPPY;
+      default:
+        return CompressionCodec.SNAPPY;
+    }
+  }
+
+  /**
+   * Below method will be used to convert thrift segment object to wrapper
+   * segment object
+   *
+   * @param segmentInfo thrift segment info object
+   * @return wrapper segment info object
+   */
+  private SegmentInfo getSegmentInfo(org.apache.carbondata.format.SegmentInfo segmentInfo) {
+    SegmentInfo info = new SegmentInfo();
+    int[] cardinality = new int[segmentInfo.getColumn_cardinalities().size()];
+    for (int i = 0; i < cardinality.length; i++) {
+      cardinality[i] = segmentInfo.getColumn_cardinalities().get(i);
+    }
+    info.setColumnCardinality(cardinality);
+    info.setNumberOfColumns(segmentInfo.getNum_cols());
+    return info;
+  }
+
+  /**
+   * Below method will be used to convert the blocklet index of thrift to
+   * wrapper
+   *
+   * @param blockletIndexThrift
+   * @return blocklet index wrapper
+   */
+  private BlockletIndex getBlockletIndex(
+      org.apache.carbondata.format.BlockletIndex blockletIndexThrift) {
+    org.apache.carbondata.format.BlockletBTreeIndex btreeIndex =
+        blockletIndexThrift.getB_tree_index();
+    org.apache.carbondata.format.BlockletMinMaxIndex minMaxIndex =
+        blockletIndexThrift.getMin_max_index();
+    return new BlockletIndex(
+        new BlockletBTreeIndex(btreeIndex.getStart_key(), btreeIndex.getEnd_key()),
+        new BlockletMinMaxIndex(minMaxIndex.getMin_values(), minMaxIndex.getMax_values()));
+  }
+
+  /**
+   * Below method will be used to convert the thrift compression meta to
+   * wrapper chunk compression meta
+   *
+   * @param chunkCompressionMetaThrift
+   * @return chunkCompressionMetaWrapper
+   */
+  private ChunkCompressorMeta getChunkCompressionMeta(
+      org.apache.carbondata.format.ChunkCompressionMeta chunkCompressionMetaThrift) {
+    ChunkCompressorMeta compressorMeta = new ChunkCompressorMeta();
+    compressorMeta
+        .setCompressor(getCompressionCodec(chunkCompressionMetaThrift.getCompression_codec()));
+    compressorMeta.setCompressedSize(chunkCompressionMetaThrift.getTotal_compressed_size());
+    compressorMeta.setUncompressedSize(chunkCompressionMetaThrift.getTotal_uncompressed_size());
+    return compressorMeta;
+  }
+
+  /**
+   * Below method will be used to convert the thrift data type to wrapper data
+   * type
+   *
+   * @param dataTypeThrift
+   * @return dataType wrapper
+   */
+  private DataType thriftDataTyopeToWrapperDataType(
+      org.apache.carbondata.format.DataType dataTypeThrift) {
+    switch (dataTypeThrift) {
+      case STRING:
+        return DataType.STRING;
+      case SHORT:
+        return DataType.SHORT;
+      case INT:
+        return DataType.INT;
+      case LONG:
+        return DataType.LONG;
+      case DOUBLE:
+        return DataType.DOUBLE;
+      case DECIMAL:
+        return DataType.DECIMAL;
+      case TIMESTAMP:
+        return DataType.TIMESTAMP;
+      case ARRAY:
+        return DataType.ARRAY;
+      case STRUCT:
+        return DataType.STRUCT;
+      default:
+        return DataType.STRING;
+    }
+  }
+
+  /**
+   * Below method will be used to convert the thrift presence meta to wrapper
+   * presence meta
+   *
+   * @param presentMetadataThrift
+   * @return wrapper presence meta
+   */
+  private PresenceMeta getPresenceMeta(
+      org.apache.carbondata.format.PresenceMeta presentMetadataThrift) {
+    PresenceMeta presenceMeta = new PresenceMeta();
+    presenceMeta.setRepresentNullValues(presentMetadataThrift.isRepresents_presence());
+    presenceMeta.setBitSet(BitSet.valueOf(presentMetadataThrift.getPresent_bit_stream()));
+    return presenceMeta;
+  }
+
+  /**
+   * Below method will be used to convert the thrift object to wrapper object
+   *
+   * @param sortStateThrift
+   * @return wrapper sort state object
+   */
+  private SortState getSortState(org.apache.carbondata.format.SortState sortStateThrift) {
+    if (sortStateThrift == org.apache.carbondata.format.SortState.SORT_EXPLICIT) {
+      return SortState.SORT_EXPLICT;
+    } else if (sortStateThrift == org.apache.carbondata.format.SortState.SORT_NATIVE) {
+      return SortState.SORT_NATIVE;
+    } else {
+      return SortState.SORT_NONE;
+    }
+  }
+
+  /**
+   * Below method will be used to convert the thrift data chunk to wrapper
+   * data chunk
+   *
+   * @param datachunkThrift
+   * @return wrapper data chunk
+   */
+  private DataChunk getDataChunk(org.apache.carbondata.format.DataChunk datachunkThrift,
+      boolean isPresenceMetaPresent) {
+    DataChunk dataChunk = new DataChunk();
+    dataChunk.setColumnUniqueIdList(datachunkThrift.getColumn_ids());
+    dataChunk.setDataPageLength(datachunkThrift.getData_page_length());
+    dataChunk.setDataPageOffset(datachunkThrift.getData_page_offset());
+    if (isPresenceMetaPresent) {
+      dataChunk.setNullValueIndexForColumn(getPresenceMeta(datachunkThrift.getPresence()));
+    }
+    dataChunk.setRlePageLength(datachunkThrift.getRle_page_length());
+    dataChunk.setRlePageOffset(datachunkThrift.getRle_page_offset());
+    dataChunk.setRowMajor(datachunkThrift.isRowMajor());
+    dataChunk.setRowIdPageLength(datachunkThrift.getRowid_page_length());
+    dataChunk.setRowIdPageOffset(datachunkThrift.getRowid_page_offset());
+    dataChunk.setSortState(getSortState(datachunkThrift.getSort_state()));
+    dataChunk.setChunkCompressionMeta(getChunkCompressionMeta(datachunkThrift.getChunk_meta()));
+    List<Encoding> encodingList = new ArrayList<Encoding>(datachunkThrift.getEncoders().size());
+    for (int i = 0; i < datachunkThrift.getEncoders().size(); i++) {
+      encodingList.add(fromExternalToWrapperEncoding(datachunkThrift.getEncoders().get(i)));
+    }
+    dataChunk.setEncoderList(encodingList);
+    if (encodingList.contains(Encoding.DELTA)) {
+      List<ByteBuffer> thriftEncoderMeta = datachunkThrift.getEncoder_meta();
+      List<ValueEncoderMeta> encodeMetaList =
+          new ArrayList<ValueEncoderMeta>(thriftEncoderMeta.size());
+      for (int i = 0; i < thriftEncoderMeta.size(); i++) {
+        encodeMetaList.add(deserializeEncoderMeta(thriftEncoderMeta.get(i).array()));
+      }
+      dataChunk.setValueEncoderMeta(encodeMetaList);
+    }
+    return dataChunk;
+  }
+
+  /**
+   * Below method will be used to convert the encode metadata to
+   * ValueEncoderMeta object
+   *
+   * @param encoderMeta
+   * @return ValueEncoderMeta object
+   */
+  private ValueEncoderMeta deserializeEncoderMeta(byte[] encoderMeta) {
+    // TODO : should remove the unnecessary fields.
+    ByteArrayInputStream aos = null;
+    ObjectInputStream objStream = null;
+    ValueEncoderMeta meta = null;
+    try {
+      aos = new ByteArrayInputStream(encoderMeta);
+      objStream = new ObjectInputStream(aos);
+      meta = (ValueEncoderMeta) objStream.readObject();
+    } catch (ClassNotFoundException e) {
+      LOGGER.error(e);
+    } catch (IOException e) {
+      CarbonUtil.closeStreams(objStream);
+    }
+    return meta;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java b/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
new file mode 100644
index 0000000..a821fb0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
@@ -0,0 +1,410 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.math.RoundingMode;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+import org.apache.commons.lang.NumberUtils;
+import org.apache.spark.unsafe.types.UTF8String;
+
+public final class DataTypeUtil {
+
+  /**
+   * LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(DataTypeUtil.class.getName());
+
+  /**
+   * This method will convert a given value to its specific type
+   *
+   * @param msrValue
+   * @param dataType
+   * @param carbonMeasure
+   * @return
+   */
+  public static Object getMeasureValueBasedOnDataType(String msrValue, DataType dataType,
+      CarbonMeasure carbonMeasure) {
+    switch (dataType) {
+      case DECIMAL:
+        BigDecimal bigDecimal =
+            new BigDecimal(msrValue).setScale(carbonMeasure.getScale(), RoundingMode.HALF_UP);
+        return normalizeDecimalValue(bigDecimal, carbonMeasure.getPrecision());
+      case INT:
+        return Double.valueOf(msrValue).longValue();
+      case LONG:
+        return Long.valueOf(msrValue);
+      default:
+        return Double.valueOf(msrValue);
+    }
+  }
+
+  /**
+   * This method will check the digits before dot with the max precision allowed
+   *
+   * @param bigDecimal
+   * @param allowedPrecision precision configured by the user
+   * @return
+   */
+  private static BigDecimal normalizeDecimalValue(BigDecimal bigDecimal, int allowedPrecision) {
+    if (bigDecimal.precision() > allowedPrecision) {
+      return null;
+    }
+    return bigDecimal;
+  }
+
+  /**
+   * This method will return the type of measure based on its data type
+   *
+   * @param dataType
+   * @return
+   */
+  public static char getAggType(DataType dataType) {
+    switch (dataType) {
+      case DECIMAL:
+        return CarbonCommonConstants.BIG_DECIMAL_MEASURE;
+      case INT:
+      case LONG:
+        return CarbonCommonConstants.BIG_INT_MEASURE;
+      default:
+        return CarbonCommonConstants.SUM_COUNT_VALUE_MEASURE;
+    }
+  }
+
+  /**
+   * This method will convert a big decimal value to bytes
+   *
+   * @param num
+   * @return
+   */
+  public static byte[] bigDecimalToByte(BigDecimal num) {
+    BigInteger sig = new BigInteger(num.unscaledValue().toString());
+    int scale = num.scale();
+    byte[] bscale = new byte[] { (byte) (scale) };
+    byte[] buff = sig.toByteArray();
+    byte[] completeArr = new byte[buff.length + bscale.length];
+    System.arraycopy(bscale, 0, completeArr, 0, bscale.length);
+    System.arraycopy(buff, 0, completeArr, bscale.length, buff.length);
+    return completeArr;
+  }
+
+  /**
+   * This method will convert a byte value back to big decimal value
+   *
+   * @param raw
+   * @return
+   */
+  public static BigDecimal byteToBigDecimal(byte[] raw) {
+    int scale = (raw[0] & 0xFF);
+    byte[] unscale = new byte[raw.length - 1];
+    System.arraycopy(raw, 1, unscale, 0, unscale.length);
+    BigInteger sig = new BigInteger(unscale);
+    return new BigDecimal(sig, scale);
+  }
+
+  /**
+   * returns the SqlStatement.Type of corresponding string value
+   *
+   * @param dataTypeStr
+   * @return return the SqlStatement.Type
+   */
+  public static DataType getDataType(String dataTypeStr) {
+    DataType dataType = null;
+    switch (dataTypeStr) {
+      case "TIMESTAMP":
+        dataType = DataType.TIMESTAMP;
+        break;
+      case "STRING":
+        dataType = DataType.STRING;
+        break;
+      case "INT":
+        dataType = DataType.INT;
+        break;
+      case "SHORT":
+        dataType = DataType.SHORT;
+        break;
+      case "LONG":
+        dataType = DataType.LONG;
+        break;
+      case "DOUBLE":
+        dataType = DataType.DOUBLE;
+        break;
+      case "DECIMAL":
+        dataType = DataType.DECIMAL;
+        break;
+      case "ARRAY":
+        dataType = DataType.ARRAY;
+        break;
+      case "STRUCT":
+        dataType = DataType.STRUCT;
+        break;
+      case "MAP":
+      default:
+        dataType = DataType.STRING;
+    }
+    return dataType;
+  }
+
+  /**
+   * Below method will be used to basically to know whether the input data is valid string of
+   * giving data type. If there is any non parseable string is present return false.
+   */
+  public static boolean isValidData(String data, DataType actualDataType) {
+    if (null == data) {
+      return false;
+    }
+    try {
+      switch (actualDataType) {
+        case SHORT:
+        case INT:
+        case LONG:
+        case DOUBLE:
+        case DECIMAL:
+          return NumberUtils.isNumber(data);
+        case TIMESTAMP:
+          if (data.isEmpty()) {
+            return false;
+          }
+          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
+              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+          try {
+            parser.parse(data);
+            return true;
+          } catch (ParseException e) {
+            return false;
+          }
+        default:
+          return true;
+      }
+    } catch (NumberFormatException ex) {
+      return false;
+    }
+  }
+
+  /**
+   * Below method will be used to convert the data passed to its actual data
+   * type
+   *
+   * @param data           data
+   * @param actualDataType actual data type
+   * @return actual data after conversion
+   */
+  public static Object getDataBasedOnDataType(String data, DataType actualDataType) {
+
+    if (null == data || CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(data)) {
+      return null;
+    }
+    try {
+      switch (actualDataType) {
+        case INT:
+          if (data.isEmpty()) {
+            return null;
+          }
+          return Integer.parseInt(data);
+        case SHORT:
+          if (data.isEmpty()) {
+            return null;
+          }
+          return Short.parseShort(data);
+        case DOUBLE:
+          if (data.isEmpty()) {
+            return null;
+          }
+          return Double.parseDouble(data);
+        case LONG:
+          if (data.isEmpty()) {
+            return null;
+          }
+          return Long.parseLong(data);
+        case TIMESTAMP:
+          if (data.isEmpty()) {
+            return null;
+          }
+          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
+              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+          Date dateToStr = null;
+          try {
+            dateToStr = parser.parse(data);
+            return dateToStr.getTime() * 1000;
+          } catch (ParseException e) {
+            LOGGER.error("Cannot convert" + data + " to Time/Long type value" + e.getMessage());
+            return null;
+          }
+        case DECIMAL:
+          if (data.isEmpty()) {
+            return null;
+          }
+          java.math.BigDecimal javaDecVal = new java.math.BigDecimal(data);
+          scala.math.BigDecimal scalaDecVal = new scala.math.BigDecimal(javaDecVal);
+          org.apache.spark.sql.types.Decimal decConverter =
+              new org.apache.spark.sql.types.Decimal();
+          return decConverter.set(scalaDecVal);
+        default:
+          return UTF8String.fromString(data);
+      }
+    } catch (NumberFormatException ex) {
+      LOGGER.error("Problem while converting data type" + data);
+      return null;
+    }
+
+  }
+
+  public static Object getMeasureDataBasedOnDataType(Object data, DataType dataType) {
+
+    if (null == data) {
+      return null;
+    }
+    try {
+      switch (dataType) {
+        case DOUBLE:
+          return data;
+        case LONG:
+          return data;
+        case DECIMAL:
+          java.math.BigDecimal javaDecVal = new java.math.BigDecimal(data.toString());
+          scala.math.BigDecimal scalaDecVal = new scala.math.BigDecimal(javaDecVal);
+          org.apache.spark.sql.types.Decimal decConverter =
+              new org.apache.spark.sql.types.Decimal();
+          return decConverter.set(scalaDecVal);
+        default:
+          return data;
+      }
+    } catch (NumberFormatException ex) {
+      LOGGER.error("Problem while converting data type" + data);
+      return null;
+    }
+
+  }
+
+  /**
+   * Below method will be used to basically to know whether any non parseable
+   * data is present or not. if present then return null so that system can
+   * process to default null member value.
+   *
+   * @param data           data
+   * @param actualDataType actual data type
+   * @return actual data after conversion
+   */
+  public static Object normalizeIntAndLongValues(String data, DataType actualDataType) {
+    if (null == data) {
+      return null;
+    }
+    try {
+      Object parsedValue = null;
+      switch (actualDataType) {
+        case INT:
+          parsedValue = Integer.parseInt(data);
+          break;
+        case LONG:
+          parsedValue = Long.parseLong(data);
+          break;
+        default:
+          return data;
+      }
+      if(null != parsedValue) {
+        return data;
+      }
+      return null;
+    } catch (NumberFormatException ex) {
+      return null;
+    }
+  }
+
+  /**
+   * This method will parse a given string value corresponding to its data type
+   *
+   * @param value     value to parse
+   * @param dimension dimension to get data type and precision and scale in case of decimal
+   *                  data type
+   * @return
+   */
+  public static String normalizeColumnValueForItsDataType(String value, CarbonDimension dimension) {
+    try {
+      Object parsedValue = null;
+      // validation will not be done for timestamp datatype as for timestamp direct dictionary
+      // is generated. No dictionary file is created for timestamp datatype column
+      switch (dimension.getDataType()) {
+        case DECIMAL:
+          return parseStringToBigDecimal(value, dimension);
+        case INT:
+        case LONG:
+          parsedValue = normalizeIntAndLongValues(value, dimension.getDataType());
+          break;
+        case DOUBLE:
+          parsedValue = Double.parseDouble(value);
+          break;
+        default:
+          return value;
+      }
+      if (null != parsedValue) {
+        return value;
+      }
+      return null;
+    } catch (Exception e) {
+      return null;
+    }
+  }
+
+  /**
+   * This method will parse a value to its datatype if datatype is decimal else will return
+   * the value passed
+   *
+   * @param value     value to be parsed
+   * @param dimension
+   * @return
+   */
+  public static String parseValue(String value, CarbonDimension dimension) {
+    try {
+      switch (dimension.getDataType()) {
+        case DECIMAL:
+          return parseStringToBigDecimal(value, dimension);
+        default:
+          return value;
+      }
+    } catch (Exception e) {
+      return null;
+    }
+  }
+
+  private static String parseStringToBigDecimal(String value, CarbonDimension dimension) {
+    BigDecimal bigDecimal = new BigDecimal(value)
+        .setScale(dimension.getColumnSchema().getScale(), RoundingMode.HALF_UP);
+    BigDecimal normalizedValue =
+        normalizeDecimalValue(bigDecimal, dimension.getColumnSchema().getPrecision());
+    if (null != normalizedValue) {
+      return normalizedValue.toString();
+    }
+    return null;
+  }
+}



[49/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/carbondata/common/logging/ft/LoggingServiceTest_FT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/carbondata/common/logging/ft/LoggingServiceTest_FT.java b/common/src/test/java/org/carbondata/common/logging/ft/LoggingServiceTest_FT.java
deleted file mode 100644
index 66d1e3f..0000000
--- a/common/src/test/java/org/carbondata/common/logging/ft/LoggingServiceTest_FT.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.ft;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStreamReader;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-
-import junit.framework.TestCase;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.MDC;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class LoggingServiceTest_FT extends TestCase {
-
-  private static LogService logger =
-      LogServiceFactory.getLogService(LoggingServiceTest_FT.class.getName());
-
-  @Before public void setUp() throws Exception {
-    MDC.put("MODULE", "Function Test");
-    MDC.put("USER_NAME", "testuser");
-    MDC.put("CLIENT_IP", "127.0.0.1");
-    MDC.put("OPERATRION", "log");
-  }
-
-  @Test public void testIsAuditFileCreated() {
-    File f = new File("./unibiaudit.log");
-    Assert.assertFalse(f.exists());
-  }
-
-  @Test public void testAudit() {
-
-    String expectedAuditLine =
-        "[main] AUDIT [com.huawei.iweb.platform.logging.ft.LoggingServiceTest_FT] 127.0.0.1 "
-            + "testuser Function Test log- audit message created";
-    logger.audit("audit message created");
-
-    LogManager.shutdown();
-
-    try {
-      FileInputStream fstream = new FileInputStream("./unibiaudit.log");
-      BufferedReader br = new BufferedReader(new InputStreamReader(fstream));
-      String actualAuditLine = null;
-      String strLine = null;
-      while ((strLine = br.readLine()) != null) {
-        actualAuditLine = strLine;
-      }
-
-      System.out.println(actualAuditLine);
-
-      if (actualAuditLine != null) {
-        int index = actualAuditLine.indexOf("[main]");
-        actualAuditLine = actualAuditLine.substring(index);
-        Assert.assertEquals(expectedAuditLine, actualAuditLine);
-      } else {
-        Assert.assertTrue(false);
-      }
-    } catch (FileNotFoundException e) {
-      e.printStackTrace();
-      Assert.assertTrue(!false);
-    } catch (IOException e) {
-      e.printStackTrace();
-      Assert.assertTrue(false);
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/carbondata/common/logging/impl/AuditExtendedRollingFileAppenderTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/carbondata/common/logging/impl/AuditExtendedRollingFileAppenderTest_UT.java b/common/src/test/java/org/carbondata/common/logging/impl/AuditExtendedRollingFileAppenderTest_UT.java
deleted file mode 100644
index 8aa82d8..0000000
--- a/common/src/test/java/org/carbondata/common/logging/impl/AuditExtendedRollingFileAppenderTest_UT.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.impl;
-
-import org.carbondata.common.logging.impl.AuditExtendedRollingFileAppender;
-import org.carbondata.common.logging.impl.AuditLevel;
-
-import junit.framework.Assert;
-import mockit.Deencapsulation;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class AuditExtendedRollingFileAppenderTest_UT {
-
-  private AuditExtendedRollingFileAppender rAppender = null;
-
-  @Before public void setUp() throws Exception {
-    rAppender = new AuditExtendedRollingFileAppender();
-    Deencapsulation.setField(rAppender, "fileName", "audit.log");
-    Deencapsulation.setField(rAppender, "maxBackupIndex", 1);
-    Deencapsulation.setField(rAppender, "maxFileSize", 1000L);
-
-  }
-
-  @After public void tearDown() throws Exception {
-
-  }
-
-  @Test public void testRollOver() {
-    rAppender.rollOver();
-    rAppender.rollOver();
-    rAppender.rollOver();
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testCleanLogs() {
-    final String startName = "audit";
-    final String folderPath = "./";
-    int maxBackupIndex = 1;
-
-    Deencapsulation.invoke(rAppender, "cleanLogs", startName, folderPath, maxBackupIndex);
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testSubAppendLoggingEvent() {
-    Logger logger = Logger.getLogger(this.getClass());
-    LoggingEvent event = new LoggingEvent(null, logger, 0L, AuditLevel.AUDIT, null, null);
-
-    Deencapsulation.setField(rAppender, "qw", null);
-    try {
-      rAppender.subAppend(event);
-    } catch (Exception e) {
-      //
-    }
-    Assert.assertTrue(true);
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/carbondata/common/logging/impl/AuditLevelTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/carbondata/common/logging/impl/AuditLevelTest_UT.java b/common/src/test/java/org/carbondata/common/logging/impl/AuditLevelTest_UT.java
deleted file mode 100644
index 4e29d2f..0000000
--- a/common/src/test/java/org/carbondata/common/logging/impl/AuditLevelTest_UT.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.impl;
-
-import org.carbondata.common.logging.impl.AuditLevel;
-
-import junit.framework.TestCase;
-import org.apache.log4j.Level;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class AuditLevelTest_UT extends TestCase {
-
-  @Before public void setUp() throws Exception {
-  }
-
-  @After public void tearDown() throws Exception {
-  }
-
-  @Test public void testAuditLevel() {
-    assertEquals(AuditLevel.AUDIT.toInt(), 55000);
-  }
-
-  @Test public void testToLevelIntLevel() {
-    assertSame(AuditLevel.AUDIT, AuditLevel.toLevel(55000, Level.DEBUG));
-  }
-
-  @Test public void testToLevelStringLevel() {
-    assertSame(AuditLevel.AUDIT, AuditLevel.toLevel("AUDIT", Level.DEBUG));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/carbondata/common/logging/impl/ExtendedRollingFileAppenderTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/carbondata/common/logging/impl/ExtendedRollingFileAppenderTest_UT.java b/common/src/test/java/org/carbondata/common/logging/impl/ExtendedRollingFileAppenderTest_UT.java
deleted file mode 100644
index 14ccd20..0000000
--- a/common/src/test/java/org/carbondata/common/logging/impl/ExtendedRollingFileAppenderTest_UT.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.impl;
-
-import org.carbondata.common.logging.impl.AuditLevel;
-import org.carbondata.common.logging.impl.ExtendedRollingFileAppender;
-
-import junit.framework.Assert;
-import mockit.Deencapsulation;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class ExtendedRollingFileAppenderTest_UT {
-
-  private ExtendedRollingFileAppender rAppender = null;
-
-  @Before public void setUp() throws Exception {
-    rAppender = new ExtendedRollingFileAppender();
-    Deencapsulation.setField(rAppender, "fileName", "dummy.log");
-    Deencapsulation.setField(rAppender, "maxBackupIndex", 1);
-    Deencapsulation.setField(rAppender, "maxFileSize", 1000L);
-  }
-
-  @After public void tearDown() throws Exception {
-  }
-
-  @Test public void testRollOver() {
-    rAppender.rollOver();
-    rAppender.rollOver();
-    rAppender.rollOver();
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testCleanLogs() {
-    final String startName = "dummy";
-    final String folderPath = "./";
-    int maxBackupIndex = 1;
-
-    Deencapsulation.invoke(rAppender, "cleanLogs", startName, folderPath, maxBackupIndex);
-  }
-
-  @Test public void testSubAppendLoggingEvent() {
-    Logger logger = Logger.getLogger(this.getClass());
-    LoggingEvent event = new LoggingEvent(null, logger, 0L, AuditLevel.DEBUG, null, null);
-
-    try {
-      rAppender.subAppend(event);
-    } catch (Exception e) {
-      //
-    }
-    Assert.assertTrue(true);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/carbondata/common/logging/impl/FileUtilTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/carbondata/common/logging/impl/FileUtilTest_UT.java b/common/src/test/java/org/carbondata/common/logging/impl/FileUtilTest_UT.java
deleted file mode 100644
index 900cd0e..0000000
--- a/common/src/test/java/org/carbondata/common/logging/impl/FileUtilTest_UT.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.impl;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-
-import junit.framework.TestCase;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class FileUtilTest_UT extends TestCase {
-
-  /**
-   * @throws Exception
-   */
-  @Before public void setUp() throws Exception {
-    File f = new File("myfile.txt");
-    if (!f.exists()) {
-      f.createNewFile();
-    }
-  }
-
-  /**
-   * @throws Exception
-   */
-  @After public void tearDown() throws Exception {
-    File f = new File("myfile.txt");
-    if (f.exists()) {
-      f.delete();
-    }
-  }
-
-  @Test public void testClose() {
-    try {
-      FileInputStream in = new FileInputStream(new File("myfile.txt"));
-      FileUtil.close(in);
-      assertTrue(true);
-    } catch (FileNotFoundException e) {
-      assertTrue(false);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/common/src/test/java/org/carbondata/common/logging/impl/StandardLogServiceTest_UT.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/carbondata/common/logging/impl/StandardLogServiceTest_UT.java b/common/src/test/java/org/carbondata/common/logging/impl/StandardLogServiceTest_UT.java
deleted file mode 100644
index 4c9f0a3..0000000
--- a/common/src/test/java/org/carbondata/common/logging/impl/StandardLogServiceTest_UT.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.common.logging.impl;
-
-import org.carbondata.common.logging.impl.StandardLogService;
-
-import junit.framework.TestCase;
-import mockit.Mock;
-import mockit.MockUp;
-import org.apache.log4j.Category;
-import org.apache.log4j.Priority;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class StandardLogServiceTest_UT extends TestCase {
-
-  private StandardLogService logService = null;
-
-  /**
-   * @throws Exception
-   */
-  @Before public void setUp() throws Exception {
-
-    new MockUp<Category>() {
-      @SuppressWarnings("unused")
-      @Mock public boolean isDebugEnabled() {
-        return true;
-      }
-
-      @SuppressWarnings("unused")
-      @Mock public boolean isEnabledFor(Priority level) {
-        return true;
-      }
-
-      @SuppressWarnings("unused")
-      @Mock public boolean isInfoEnabled() {
-        return true;
-      }
-    };
-
-    logService = new StandardLogService(this.getClass().getName());
-  }
-
-  /**
-   * @throws Exception
-   * @Author k00742797
-   * @Description : tearDown
-   */
-  @After public void tearDown() throws Exception {
-  }
-
-  @Test public void testStandardLogService() {
-    if (logService != null && logService instanceof StandardLogService) {
-      Assert.assertTrue(true);
-    } else {
-      Assert.assertTrue(false);
-    }
-  }
-
-  @Test public void testIsDebugEnabled() {
-    Assert.assertEquals(true, logService.isDebugEnabled());
-  }
-
-  @Test public void testIsWarnEnabled() {
-    Assert.assertEquals(true, logService.isWarnEnabled());
-  }
-
-  @Test public void testSecureLogEventObjectArray() {
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testAuditLogEventObjectArray() {
-    logService.audit("testing");
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testDebugLogEventObjectArray() {
-    logService.debug("testing");
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testErrorLogEventObjectArray() {
-    logService.error("testing");
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testErrorLogEventThrowableObjectArray() {
-    Exception exception = new Exception("test");
-    logService.error(exception);
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testErrorLogEventThrowableMessage() {
-    Exception exception = new Exception("test");
-    logService.error(exception, "additional message");
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testInfoLogEventObjectArray() {
-    logService.info("testing");
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testIsInfoEnabled() {
-    Assert.assertEquals(true, logService.isInfoEnabled());
-  }
-
-  @Test public void testDeleteLogs() {
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testFlushLogs() {
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testSetEventProperties() {
-    logService.setEventProperties("CLIENT_IP", "127.0.0.1");
-    Assert.assertTrue(true);
-  }
-
-  @Test public void testIsDoLog() {
-    StandardLogService.setDoLog(true);
-    Assert.assertEquals(true, StandardLogService.isDoLog());
-
-    StandardLogService.setDoLog(false);
-    Assert.assertEquals(false, StandardLogService.isDoLog());
-
-  }
-
-  @Test public void testSetDoLog() {
-    StandardLogService.setDoLog(true);
-    Assert.assertEquals(true, StandardLogService.isDoLog());
-  }
-
-  @Test public void testAuditString() {
-    logService.audit("audit message");
-    Assert.assertTrue(true);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/common/ext/ColumnUniqueIdGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/common/ext/ColumnUniqueIdGenerator.java b/core/src/main/java/org/apache/carbondata/common/ext/ColumnUniqueIdGenerator.java
new file mode 100644
index 0000000..577a5ed
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/common/ext/ColumnUniqueIdGenerator.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.common.ext;
+
+import java.util.UUID;
+
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.service.ColumnUniqueIdService;
+
+/**
+ * It returns unique id given column
+ */
+public class ColumnUniqueIdGenerator implements ColumnUniqueIdService {
+
+  private static ColumnUniqueIdService columnUniqueIdService = new ColumnUniqueIdGenerator();
+
+  @Override public String generateUniqueId(String databaseName, ColumnSchema columnSchema) {
+    return UUID.randomUUID().toString();
+  }
+
+  public static ColumnUniqueIdService getInstance() {
+    return columnUniqueIdService;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/common/ext/DictionaryFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/common/ext/DictionaryFactory.java b/core/src/main/java/org/apache/carbondata/common/ext/DictionaryFactory.java
new file mode 100644
index 0000000..3cf1ad5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/common/ext/DictionaryFactory.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.common.ext;
+
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.reader.CarbonDictionaryMetadataReader;
+import org.apache.carbondata.core.reader.CarbonDictionaryMetadataReaderImpl;
+import org.apache.carbondata.core.reader.CarbonDictionaryReader;
+import org.apache.carbondata.core.reader.CarbonDictionaryReaderImpl;
+import org.apache.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReader;
+import org.apache.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReaderImpl;
+import org.apache.carbondata.core.service.DictionaryService;
+import org.apache.carbondata.core.writer.CarbonDictionaryWriter;
+import org.apache.carbondata.core.writer.CarbonDictionaryWriterImpl;
+import org.apache.carbondata.core.writer.sortindex.CarbonDictionarySortIndexWriter;
+import org.apache.carbondata.core.writer.sortindex.CarbonDictionarySortIndexWriterImpl;
+
+/**
+ * service to get dictionary reader and writer
+ */
+public class DictionaryFactory implements DictionaryService {
+
+  private static DictionaryService dictService = new DictionaryFactory();
+
+  /**
+   * get dictionary writer
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param carbonStorePath
+   * @return
+   */
+  @Override public CarbonDictionaryWriter getDictionaryWriter(
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
+      String carbonStorePath) {
+    return new CarbonDictionaryWriterImpl(carbonStorePath, carbonTableIdentifier, columnIdentifier);
+  }
+
+  /**
+   * get dictionary sort index writer
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param carbonStorePath
+   * @return
+   */
+  @Override public CarbonDictionarySortIndexWriter getDictionarySortIndexWriter(
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
+      String carbonStorePath) {
+    return new CarbonDictionarySortIndexWriterImpl(carbonTableIdentifier, columnIdentifier,
+        carbonStorePath);
+  }
+
+  /**
+   * get dictionary metadata reader
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param carbonStorePath
+   * @return
+   */
+  @Override public CarbonDictionaryMetadataReader getDictionaryMetadataReader(
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
+      String carbonStorePath) {
+    return new CarbonDictionaryMetadataReaderImpl(carbonStorePath, carbonTableIdentifier,
+        columnIdentifier);
+  }
+
+  /**
+   * get dictionary reader
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param carbonStorePath
+   * @return
+   */
+  @Override public CarbonDictionaryReader getDictionaryReader(
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
+      String carbonStorePath) {
+    return new CarbonDictionaryReaderImpl(carbonStorePath, carbonTableIdentifier, columnIdentifier);
+  }
+
+  /**
+   * get dictionary sort index reader
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param carbonStorePath
+   * @return
+   */
+  @Override public CarbonDictionarySortIndexReader getDictionarySortIndexReader(
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
+      String carbonStorePath) {
+    return new CarbonDictionarySortIndexReaderImpl(carbonTableIdentifier, columnIdentifier,
+        carbonStorePath);
+  }
+
+  public static DictionaryService getInstance() {
+    return dictService;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/common/ext/PathFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/common/ext/PathFactory.java b/core/src/main/java/org/apache/carbondata/common/ext/PathFactory.java
new file mode 100644
index 0000000..e5ff83a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/common/ext/PathFactory.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.common.ext;
+
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.carbon.path.CarbonStorePath;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath;
+import org.apache.carbondata.core.service.PathService;
+
+/**
+ * Create helper to get path details
+ */
+public class PathFactory implements PathService {
+
+  private static PathService pathService = new PathFactory();
+
+  /**
+   * @param columnIdentifier
+   * @param storeLocation
+   * @param tableIdentifier
+   * @return store path related to tables
+   */
+  @Override public CarbonTablePath getCarbonTablePath(ColumnIdentifier columnIdentifier,
+      String storeLocation, CarbonTableIdentifier tableIdentifier) {
+    return CarbonStorePath.getCarbonTablePath(storeLocation, tableIdentifier);
+  }
+
+  public static PathService getInstance() {
+    return pathService;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/common/factory/CarbonCommonFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/common/factory/CarbonCommonFactory.java b/core/src/main/java/org/apache/carbondata/common/factory/CarbonCommonFactory.java
new file mode 100644
index 0000000..09b4465
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/common/factory/CarbonCommonFactory.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.common.factory;
+
+import org.apache.carbondata.common.ext.ColumnUniqueIdGenerator;
+import org.apache.carbondata.common.ext.DictionaryFactory;
+import org.apache.carbondata.common.ext.PathFactory;
+import org.apache.carbondata.core.service.ColumnUniqueIdService;
+import org.apache.carbondata.core.service.DictionaryService;
+import org.apache.carbondata.core.service.PathService;
+
+/**
+ * Interface to get services
+ */
+public class CarbonCommonFactory {
+
+  /**
+   * @return dictionary service
+   */
+  public static DictionaryService getDictionaryService() {
+    return DictionaryFactory.getInstance();
+  }
+
+  /**
+   * @return path service
+   */
+  public static PathService getPathService() {
+    return PathFactory.getInstance();
+  }
+
+  /**
+   * @return unique id generator
+   */
+  public static ColumnUniqueIdService getColumnUniqueIdGenerator() {
+    return ColumnUniqueIdGenerator.getInstance();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/Cache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/Cache.java b/core/src/main/java/org/apache/carbondata/core/cache/Cache.java
new file mode 100644
index 0000000..b519deb
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/Cache.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache;
+
+import java.util.List;
+
+import org.apache.carbondata.core.util.CarbonUtilException;
+
+/**
+ * A semi-persistent mapping from keys to values. Cache entries are manually added using
+ * #get(Key), #getAll(List<Keys>) , and are stored in the cache until
+ * either evicted or manually invalidated.
+ * Implementations of this interface are expected to be thread-safe, and can be safely accessed
+ * by multiple concurrent threads.
+ */
+public interface Cache<K, V> {
+
+  /**
+   * This method will get the value for the given key. If value does not exist
+   * for the given key, it will check and load the value.
+   *
+   * @param key
+   * @return
+   * @throws CarbonUtilException in case memory is not sufficient to load data into memory
+   */
+  V get(K key) throws CarbonUtilException;
+
+  /**
+   * This method will return a list of values for the given list of keys.
+   * For each key, this method will check and load the data if required.
+   *
+   * @param keys
+   * @return
+   * @throws CarbonUtilException in case memory is not sufficient to load data into memory
+   */
+  List<V> getAll(List<K> keys) throws CarbonUtilException;
+
+  /**
+   * This method will return the value for the given key. It will not check and load
+   * the data for the given key
+   *
+   * @param key
+   * @return
+   */
+  V getIfPresent(K key);
+
+  /**
+   * This method will remove the cache for a given key
+   *
+   * @param key
+   */
+  void invalidate(K key);
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/CacheProvider.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/CacheProvider.java b/core/src/main/java/org/apache/carbondata/core/cache/CacheProvider.java
new file mode 100644
index 0000000..fa505bf
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/CacheProvider.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
+import org.apache.carbondata.core.cache.dictionary.ForwardDictionaryCache;
+import org.apache.carbondata.core.cache.dictionary.ReverseDictionaryCache;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * Cache provider class which will create a cache based on given type
+ */
+public class CacheProvider {
+
+  /**
+   * cache provider instance
+   */
+  private static CacheProvider cacheProvider = new CacheProvider();
+
+  /**
+   * a map that will hold the entry for cache type to cache object mapping
+   */
+  private Map<CacheType, Cache> cacheTypeToCacheMap =
+      new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+
+  /**
+   * a map that will hold the mapping of cache type to LRU cache instance
+   */
+  private Map<CacheType, CarbonLRUCache> cacheTypeToLRUCacheMap =
+      new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+
+  /**
+   * object lock instance to be used in synchronization block
+   */
+  private final Object lock = new Object();
+
+  /**
+   * private constructor to follow singleton design pattern for this class
+   */
+  private CacheProvider() {
+
+  }
+
+  /**
+   * @return cache provider instance
+   */
+  public static CacheProvider getInstance() {
+    return cacheProvider;
+  }
+
+  /**
+   * This method will check if a cache already exists for given cache type and create in case
+   * it is not present in the map
+   *
+   * @param cacheType       type of cache
+   * @param carbonStorePath store path
+   * @param <K>
+   * @param <V>
+   * @return
+   */
+  public <K, V> Cache<K, V> createCache(CacheType cacheType, String carbonStorePath) {
+    //check if lru cache is null, if null create one
+    //check if cache is null for given cache type, if null create one
+    if (!dictionaryCacheAlreadyExists(cacheType)) {
+      synchronized (lock) {
+        if (!dictionaryCacheAlreadyExists(cacheType)) {
+          if (null == cacheTypeToLRUCacheMap.get(cacheType)) {
+            createLRULevelCacheInstance(cacheType);
+          }
+          createDictionaryCacheForGivenType(cacheType, carbonStorePath);
+        }
+      }
+    }
+    return cacheTypeToCacheMap.get(cacheType);
+  }
+
+  /**
+   * This method will create the cache for given cache type
+   *
+   * @param cacheType       type of cache
+   * @param carbonStorePath store path
+   */
+  private void createDictionaryCacheForGivenType(CacheType cacheType, String carbonStorePath) {
+    Cache cacheObject = null;
+    if (cacheType.equals(CacheType.REVERSE_DICTIONARY)) {
+      cacheObject =
+          new ReverseDictionaryCache<DictionaryColumnUniqueIdentifier, Dictionary>(carbonStorePath,
+              cacheTypeToLRUCacheMap.get(cacheType));
+    } else if (cacheType.equals(CacheType.FORWARD_DICTIONARY)) {
+      cacheObject =
+          new ForwardDictionaryCache<DictionaryColumnUniqueIdentifier, Dictionary>(carbonStorePath,
+              cacheTypeToLRUCacheMap.get(cacheType));
+    }
+    cacheTypeToCacheMap.put(cacheType, cacheObject);
+  }
+
+  /**
+   * This method will create the lru cache instance based on the given type
+   *
+   * @param cacheType
+   */
+  private void createLRULevelCacheInstance(CacheType cacheType) {
+    CarbonLRUCache carbonLRUCache = null;
+    // if cache type is dictionary cache, then same lru cache instance has to be shared
+    // between forward and reverse cache
+    if (cacheType.equals(CacheType.REVERSE_DICTIONARY) || cacheType
+        .equals(CacheType.FORWARD_DICTIONARY)) {
+      carbonLRUCache = new CarbonLRUCache(CarbonCommonConstants.CARBON_MAX_LEVEL_CACHE_SIZE,
+          CarbonCommonConstants.CARBON_MAX_LEVEL_CACHE_SIZE_DEFAULT);
+      cacheTypeToLRUCacheMap.put(CacheType.REVERSE_DICTIONARY, carbonLRUCache);
+      cacheTypeToLRUCacheMap.put(CacheType.FORWARD_DICTIONARY, carbonLRUCache);
+    }
+  }
+
+  /**
+   * This method will check whether the map already has an entry for
+   * given cache type
+   *
+   * @param cacheType
+   * @return
+   */
+  private boolean dictionaryCacheAlreadyExists(CacheType cacheType) {
+    return null != cacheTypeToCacheMap.get(cacheType);
+  }
+
+  /**
+   * Below method will be used to clear the cache
+   */
+  public void dropAllCache() {
+    cacheTypeToLRUCacheMap.clear();
+    cacheTypeToCacheMap.clear();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/CacheType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/CacheType.java b/core/src/main/java/org/apache/carbondata/core/cache/CacheType.java
new file mode 100644
index 0000000..ea511e9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/CacheType.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache;
+
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
+
+/**
+ * class which defines different cache types. cache type can be dictionary cache for
+ * forward (surrogate key to byte array mapping) and reverse (byte array to
+ * surrogate mapping) dictionary or a B-tree cache
+ */
+public class CacheType<K, V> {
+
+  /**
+   * Forward dictionary cache which maintains surrogate key to byte array mapping
+   */
+  public static final CacheType<DictionaryColumnUniqueIdentifier, Dictionary> FORWARD_DICTIONARY =
+      new CacheType("forward_dictionary");
+
+  /**
+   * Reverse dictionary cache which maintains byte array to surrogate key mapping
+   */
+  public static final CacheType<DictionaryColumnUniqueIdentifier, Dictionary> REVERSE_DICTIONARY =
+      new CacheType("reverse_dictionary");
+
+  /**
+   * cacheName which is unique name for a cache
+   */
+  private String cacheName;
+
+  /**
+   * @param cacheName
+   */
+  private CacheType(String cacheName) {
+    this.cacheName = cacheName;
+  }
+
+  /**
+   * @return cache unique name
+   */
+  public String getCacheName() {
+    return cacheName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/Cacheable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/Cacheable.java b/core/src/main/java/org/apache/carbondata/core/cache/Cacheable.java
new file mode 100644
index 0000000..1259fe3
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/Cacheable.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache;
+
+/**
+ * interface which declares methods which will decide whether to keep
+ * cacheable objects in memory
+ */
+public interface Cacheable {
+
+  /**
+   * This method will return the timestamp of file based on which decision
+   * the decision will be taken whether to read that file or not
+   *
+   * @return
+   */
+  long getFileTimeStamp();
+
+  /**
+   * This method will return the access count for a column based on which decision will be taken
+   * whether to keep the object in memory
+   *
+   * @return
+   */
+  int getAccessCount();
+
+  /**
+   * This method will return the memory size of a column
+   *
+   * @return
+   */
+  long getMemorySize();
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/CarbonLRUCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/CarbonLRUCache.java b/core/src/main/java/org/apache/carbondata/core/cache/CarbonLRUCache.java
new file mode 100644
index 0000000..4ba38e4
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/CarbonLRUCache.java
@@ -0,0 +1,251 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache;
+
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.CarbonProperties;
+
+/**
+ * class which manages the lru cache
+ */
+public final class CarbonLRUCache {
+  /**
+   * constant for converting MB into bytes
+   */
+  private static final int BYTE_CONVERSION_CONSTANT = 1024 * 1024;
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(CarbonLRUCache.class.getName());
+  /**
+   * Map that will contain key as table unique name and value as cache Holder
+   * object
+   */
+  private Map<String, Cacheable> lruCacheMap;
+  /**
+   * lruCacheSize
+   */
+  private long lruCacheMemorySize;
+  /**
+   * totalSize size of the cache
+   */
+  private long currentSize;
+
+  /**
+   * @param propertyName        property name to take the size configured
+   * @param defaultPropertyName default property in case size is not configured
+   */
+  public CarbonLRUCache(String propertyName, String defaultPropertyName) {
+    try {
+      lruCacheMemorySize = Integer
+          .parseInt(CarbonProperties.getInstance().getProperty(propertyName, defaultPropertyName));
+    } catch (NumberFormatException e) {
+      lruCacheMemorySize = Integer.parseInt(defaultPropertyName);
+    }
+    initCache();
+    if (lruCacheMemorySize > 0) {
+      LOGGER.info("Configured level cahce size is " + lruCacheMemorySize + " MB");
+      // convert in bytes
+      lruCacheMemorySize = lruCacheMemorySize * BYTE_CONVERSION_CONSTANT;
+    } else {
+      LOGGER.info("Column cache size not configured. Therefore default behavior will be "
+              + "considered and no LRU based eviction of columns will be done");
+    }
+  }
+
+  /**
+   * initialize lru cache
+   */
+  private void initCache() {
+    lruCacheMap =
+        new LinkedHashMap<String, Cacheable>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE, 1.0f,
+            true);
+  }
+
+  /**
+   * This method will give the list of all the keys that can be deleted from
+   * the level LRU cache
+   */
+  private List<String> getKeysToBeRemoved(long size) {
+    List<String> toBeDeletedKeys =
+        new ArrayList<String>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    long removedSize = 0;
+    for (Entry<String, Cacheable> entry : lruCacheMap.entrySet()) {
+      String key = entry.getKey();
+      Cacheable cacheInfo = entry.getValue();
+      long memorySize = cacheInfo.getMemorySize();
+      if (canBeRemoved(cacheInfo)) {
+        removedSize = removedSize + memorySize;
+        toBeDeletedKeys.add(key);
+        // check if after removing the current file size, required
+        // size when added to current size is sufficient to load a
+        // level or not
+        if (lruCacheMemorySize >= (currentSize - memorySize + size)) {
+          toBeDeletedKeys.clear();
+          toBeDeletedKeys.add(key);
+          removedSize = memorySize;
+          break;
+        }
+        // check if after removing the added size/removed size,
+        // required size when added to current size is sufficient to
+        // load a level or not
+        else if (lruCacheMemorySize >= (currentSize - removedSize + size)) {
+          break;
+        }
+      }
+    }
+    // this case will come when iteration is complete over the keys but
+    // still size is not sufficient for level file to be loaded, then we
+    // will not delete any of the keys
+    if ((currentSize - removedSize + size) > lruCacheMemorySize) {
+      toBeDeletedKeys.clear();
+    }
+    return toBeDeletedKeys;
+  }
+
+  /**
+   * @param cacheInfo
+   * @return
+   */
+  private boolean canBeRemoved(Cacheable cacheInfo) {
+    if (cacheInfo.getAccessCount() > 0) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * @param key
+   */
+  public void remove(String key) {
+    synchronized (lruCacheMap) {
+      removeKey(key);
+    }
+  }
+
+  /**
+   * This method will remove the key from lru cache
+   *
+   * @param key
+   */
+  private void removeKey(String key) {
+    Cacheable cacheable = lruCacheMap.get(key);
+    if (null != cacheable) {
+      currentSize = currentSize - cacheable.getMemorySize();
+    }
+    lruCacheMap.remove(key);
+    LOGGER.info("Removed level entry from InMemory level lru cache :: " + key);
+  }
+
+  /**
+   * This method will check if required size is available in the memory and then add
+   * the given cacheable to object to lru cache
+   *
+   * @param columnIdentifier
+   * @param cacheInfo
+   */
+  public boolean put(String columnIdentifier, Cacheable cacheInfo, long requiredSize) {
+    boolean columnKeyAddedSuccessfully = false;
+    if (freeMemorySizeForAddingCache(requiredSize)) {
+      synchronized (lruCacheMap) {
+        currentSize = currentSize + requiredSize;
+        if (null == lruCacheMap.get(columnIdentifier)) {
+          lruCacheMap.put(columnIdentifier, cacheInfo);
+        }
+        columnKeyAddedSuccessfully = true;
+      }
+      LOGGER.debug("Added level entry to InMemory level lru cache :: " + columnIdentifier);
+    } else {
+      LOGGER.error("Size not available. Column cannot be added to level lru cache :: "
+          + columnIdentifier + " .Required Size = " + requiredSize + " Size available "
+          + (lruCacheMemorySize - currentSize));
+    }
+    return columnKeyAddedSuccessfully;
+  }
+
+  /**
+   * This method will check a required column can be loaded into memory or not. If required
+   * this method will call for eviction of existing data from memory
+   *
+   * @param requiredSize
+   * @return
+   */
+  private boolean freeMemorySizeForAddingCache(long requiredSize) {
+    boolean memoryAvailable = false;
+    if (lruCacheMemorySize > 0) {
+      if (isSizeAvailableToLoadColumnDictionary(requiredSize)) {
+        memoryAvailable = true;
+      } else {
+        synchronized (lruCacheMap) {
+          // get the keys that can be removed from memory
+          List<String> keysToBeRemoved = getKeysToBeRemoved(requiredSize);
+          for (String cacheKey : keysToBeRemoved) {
+            removeKey(cacheKey);
+          }
+          // after removing the keys check again if required size is available
+          if (isSizeAvailableToLoadColumnDictionary(requiredSize)) {
+            memoryAvailable = true;
+          }
+        }
+      }
+    } else {
+      memoryAvailable = true;
+    }
+    return memoryAvailable;
+  }
+
+  /**
+   * This method will check if size is available to laod dictionary into memory
+   *
+   * @param requiredSize
+   * @return
+   */
+  private boolean isSizeAvailableToLoadColumnDictionary(long requiredSize) {
+    return lruCacheMemorySize >= (currentSize + requiredSize);
+  }
+
+  /**
+   * @param key
+   * @return
+   */
+  public Cacheable get(String key) {
+    synchronized (lruCacheMap) {
+      return lruCacheMap.get(key);
+    }
+  }
+
+  /**
+   * This method will empty the level cache
+   */
+  public void clear() {
+    synchronized (lruCacheMap) {
+      lruCacheMap.clear();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
new file mode 100644
index 0000000..a62695c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.nio.charset.Charset;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * class that implements cacheable interface and methods specific to column dictionary
+ */
+public abstract class AbstractColumnDictionaryInfo implements DictionaryInfo {
+
+  /**
+   * list that will hold all the dictionary chunks for one column
+   */
+  protected List<List<byte[]>> dictionaryChunks = new CopyOnWriteArrayList<>();
+
+  /**
+   * minimum value of surrogate key, dictionary value key will start from count 1
+   */
+  protected static final int MINIMUM_SURROGATE_KEY = 1;
+
+  /**
+   * atomic integer to maintain the access count for a column access
+   */
+  protected AtomicInteger accessCount = new AtomicInteger();
+
+  /**
+   * file timestamp
+   */
+  protected long fileTimeStamp;
+
+  /**
+   * offset till where file is read
+   */
+  protected long offsetTillFileIsRead;
+
+  /**
+   * length of dictionary metadata file
+   */
+  private long dictionaryMetaFileLength;
+
+  /**
+   * This method will return the timestamp of file based on which decision
+   * the decision will be taken whether to read that file or not
+   *
+   * @return
+   */
+  @Override public long getFileTimeStamp() {
+    return fileTimeStamp;
+  }
+
+  /**
+   * This method will return the access count for a column based on which decision will be taken
+   * whether to keep the object in memory
+   *
+   * @return
+   */
+  @Override public int getAccessCount() {
+    return accessCount.get();
+  }
+
+  /**
+   * This method will return the memory size of a column
+   *
+   * @return
+   */
+  @Override public long getMemorySize() {
+    return offsetTillFileIsRead;
+  }
+
+  /**
+   * This method will increment the access count for a column by 1
+   * whenever a column is getting used in query or incremental data load
+   */
+  @Override public void incrementAccessCount() {
+    accessCount.incrementAndGet();
+  }
+
+  /**
+   * This method will decrement the access count for a column by 1
+   * whenever a column usage is complete
+   */
+  private void decrementAccessCount() {
+    if (accessCount.get() > 0) {
+      accessCount.decrementAndGet();
+    }
+  }
+
+  /**
+   * This method will update the end offset of file everytime a file is read
+   *
+   * @param offsetTillFileIsRead
+   */
+  @Override public void setOffsetTillFileIsRead(long offsetTillFileIsRead) {
+    this.offsetTillFileIsRead = offsetTillFileIsRead;
+  }
+
+  /**
+   * This method will update the timestamp of a file if a file is modified
+   * like in case of incremental load
+   *
+   * @param fileTimeStamp
+   */
+  @Override public void setFileTimeStamp(long fileTimeStamp) {
+    this.fileTimeStamp = fileTimeStamp;
+  }
+
+  /**
+   * The method return the list of dictionary chunks of a column
+   * Applications Scenario.
+   * For preparing the column Sort info while writing the sort index file.
+   *
+   * @return
+   */
+  @Override public DictionaryChunksWrapper getDictionaryChunks() {
+    DictionaryChunksWrapper chunksWrapper = new DictionaryChunksWrapper(dictionaryChunks);
+    return chunksWrapper;
+  }
+
+  /**
+   * This method will release the objects and set default value for primitive types
+   */
+  @Override public void clear() {
+    decrementAccessCount();
+  }
+
+  /**
+   * This method will find and return the sort index for a given dictionary id.
+   * Applicable scenarios:
+   * 1. Used in case of order by queries when data sorting is required
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return if found returns key else 0
+   */
+  @Override public int getSortedIndex(int surrogateKey) {
+    return 0;
+  }
+
+  /**
+   * dictionary metadata file length which will be set whenever we reload dictionary
+   * data from disk
+   *
+   * @param dictionaryMetaFileLength length of dictionary metadata file
+   */
+  @Override public void setDictionaryMetaFileLength(long dictionaryMetaFileLength) {
+    this.dictionaryMetaFileLength = dictionaryMetaFileLength;
+  }
+
+  /**
+   * Dictionary meta file offset which will be read to check whether length of dictionary
+   * meta file has been modified
+   *
+   * @return
+   */
+  @Override public long getDictionaryMetaFileLength() {
+    return dictionaryMetaFileLength;
+  }
+
+  /**
+   * This method will find and return the dictionary value from sorted index.
+   * Applicable scenarios:
+   * 1. Query final result preparation in case of order by queries:
+   * While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param sortedIndex sort index of dictionary value
+   * @return value if found else null
+   */
+  @Override public String getDictionaryValueFromSortedIndex(int sortedIndex) {
+    return null;
+  }
+
+  /**
+   * This method will set the sort order index of a dictionary column.
+   * Sort order index if the index of dictionary values after they are sorted.
+   *
+   * @param sortOrderIndex
+   */
+  @Override public void setSortOrderIndex(List<Integer> sortOrderIndex) {
+  }
+
+  /**
+   * This method will set the sort reverse index of a dictionary column.
+   * Sort reverse index is the index of dictionary values before they are sorted.
+   *
+   * @param sortReverseOrderIndex
+   */
+  @Override public void setSortReverseOrderIndex(List<Integer> sortReverseOrderIndex) {
+  }
+
+  /**
+   * This method will find and return the dictionary value for a given surrogate key.
+   * Applicable scenarios:
+   * 1. Query final result preparation : While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return value if found else null
+   */
+  @Override public String getDictionaryValueForKey(int surrogateKey) {
+    String dictionaryValue = null;
+    if (surrogateKey < MINIMUM_SURROGATE_KEY) {
+      return dictionaryValue;
+    }
+    byte[] dictionaryValueInBytes = getDictionaryBytesFromSurrogate(surrogateKey);
+    if (null != dictionaryValueInBytes) {
+      dictionaryValue = new String(dictionaryValueInBytes,
+          Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
+    }
+    return dictionaryValue;
+  }
+
+  /**
+   * This method will find and return the dictionary value as byte array for a
+   * given surrogate key
+   *
+   * @param surrogateKey
+   * @return
+   */
+  protected byte[] getDictionaryBytesFromSurrogate(int surrogateKey) {
+    byte[] dictionaryValueInBytes = null;
+    int totalSizeOfDictionaryChunksTraversed = 0;
+    for (List<byte[]> oneDictionaryChunk : dictionaryChunks) {
+      totalSizeOfDictionaryChunksTraversed =
+          totalSizeOfDictionaryChunksTraversed + oneDictionaryChunk.size();
+      // skip the dictionary chunk till surrogate key is lesser than size of
+      // dictionary chunks traversed
+      if (totalSizeOfDictionaryChunksTraversed < surrogateKey) {
+        continue;
+      }
+      // lets say surrogateKey = 26, total size traversed is 28, dictionary chunk size = 12
+      // then surrogate position in dictionary chunk list is = 26 - (28-12) - 1 = 9
+      // -1 because list index starts from 0
+      int surrogatePositionInDictionaryChunk =
+          surrogateKey - (totalSizeOfDictionaryChunksTraversed - oneDictionaryChunk.size()) - 1;
+      dictionaryValueInBytes = oneDictionaryChunk.get(surrogatePositionInDictionaryChunk);
+      break;
+    }
+    return dictionaryValueInBytes;
+  }
+
+  /**
+   * This method will find and return the surrogate key for a given dictionary value
+   * Applicable scenario:
+   * 1. Incremental data load : Dictionary will not be generated for existing values. For
+   * that values have to be looked up in the existing dictionary cache.
+   * 2. Filter scenarios where from value surrogate key has to be found.
+   *
+   * @param value dictionary value
+   * @return if found returns key else 0
+   */
+  @Override public int getSurrogateKey(String value) {
+    byte[] keyData = value.getBytes(Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
+    return getSurrogateKey(keyData);
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractDictionaryCache.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
new file mode 100644
index 0000000..01dd269
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
@@ -0,0 +1,297 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.carbondata.common.factory.CarbonCommonFactory;
+import org.apache.carbondata.core.cache.Cache;
+import org.apache.carbondata.core.cache.CacheType;
+import org.apache.carbondata.core.cache.CarbonLRUCache;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.reader.CarbonDictionaryColumnMetaChunk;
+import org.apache.carbondata.core.reader.CarbonDictionaryMetadataReader;
+import org.apache.carbondata.core.service.DictionaryService;
+import org.apache.carbondata.core.service.PathService;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.CarbonUtilException;
+
+/**
+ * Abstract class which implements methods common to reverse and forward dictionary cache
+ */
+public abstract class AbstractDictionaryCache<K extends DictionaryColumnUniqueIdentifier,
+    V extends Dictionary>
+    implements Cache<DictionaryColumnUniqueIdentifier, Dictionary> {
+
+  /**
+   * thread pool size to be used for dictionary data reading
+   */
+  protected int thread_pool_size;
+
+  /**
+   * LRU cache variable
+   */
+  protected CarbonLRUCache carbonLRUCache;
+
+  /**
+   * c store path
+   */
+  protected String carbonStorePath;
+
+  /**
+   * @param carbonStorePath
+   * @param carbonLRUCache
+   */
+  public AbstractDictionaryCache(String carbonStorePath, CarbonLRUCache carbonLRUCache) {
+    this.carbonStorePath = carbonStorePath;
+    this.carbonLRUCache = carbonLRUCache;
+    initThreadPoolSize();
+  }
+
+  /**
+   * This method will initialize the thread pool size to be used for creating the
+   * max number of threads for a job
+   */
+  private void initThreadPoolSize() {
+    try {
+      thread_pool_size = Integer.parseInt(CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.NUM_CORES_LOADING,
+              CarbonCommonConstants.NUM_CORES_DEFAULT_VAL));
+    } catch (NumberFormatException e) {
+      thread_pool_size = Integer.parseInt(CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
+    }
+  }
+
+  /**
+   * This method will check if dictionary and its metadata file exists for a given column
+   *
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   * @return
+   */
+  protected boolean isFileExistsForGivenColumn(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
+    PathService pathService = CarbonCommonFactory.getPathService();
+    CarbonTablePath carbonTablePath = pathService
+        .getCarbonTablePath(dictionaryColumnUniqueIdentifier.getColumnIdentifier(), carbonStorePath,
+            dictionaryColumnUniqueIdentifier.getCarbonTableIdentifier());
+
+    String dictionaryFilePath =
+        carbonTablePath.getDictionaryFilePath(dictionaryColumnUniqueIdentifier
+            .getColumnIdentifier().getColumnId());
+    String dictionaryMetadataFilePath =
+        carbonTablePath.getDictionaryMetaFilePath(dictionaryColumnUniqueIdentifier
+            .getColumnIdentifier().getColumnId());
+    // check if both dictionary and its metadata file exists for a given column
+    return CarbonUtil.isFileExists(dictionaryFilePath) && CarbonUtil
+        .isFileExists(dictionaryMetadataFilePath);
+  }
+
+  /**
+   * This method will read dictionary metadata file and return the dictionary meta chunks
+   *
+   * @param dictionaryColumnUniqueIdentifier
+   * @return list of dictionary metadata chunks
+   * @throws IOException read and close method throws IO exception
+   */
+  protected CarbonDictionaryColumnMetaChunk readLastChunkFromDictionaryMetadataFile(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) throws IOException {
+    DictionaryService dictService = CarbonCommonFactory.getDictionaryService();
+    CarbonDictionaryMetadataReader columnMetadataReaderImpl = dictService
+        .getDictionaryMetadataReader(dictionaryColumnUniqueIdentifier.getCarbonTableIdentifier(),
+            dictionaryColumnUniqueIdentifier.getColumnIdentifier(), carbonStorePath);
+
+    CarbonDictionaryColumnMetaChunk carbonDictionaryColumnMetaChunk = null;
+    // read metadata file
+    try {
+      carbonDictionaryColumnMetaChunk =
+          columnMetadataReaderImpl.readLastEntryOfDictionaryMetaChunk();
+    } finally {
+      // close the metadata reader
+      columnMetadataReaderImpl.close();
+    }
+    return carbonDictionaryColumnMetaChunk;
+  }
+
+  /**
+   * This method will validate dictionary metadata file for any modification
+   *
+   * @param carbonFile
+   * @param fileTimeStamp
+   * @param endOffset
+   * @return
+   */
+  private boolean isDictionaryMetaFileModified(CarbonFile carbonFile, long fileTimeStamp,
+      long endOffset) {
+    return carbonFile.isFileModified(fileTimeStamp, endOffset);
+  }
+
+  /**
+   * This method will return the carbon file objetc based on its type (local, HDFS)
+   *
+   * @param dictionaryColumnUniqueIdentifier
+   * @return
+   */
+  private CarbonFile getDictionaryMetaCarbonFile(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
+    PathService pathService = CarbonCommonFactory.getPathService();
+    CarbonTablePath carbonTablePath = pathService
+        .getCarbonTablePath(dictionaryColumnUniqueIdentifier.getColumnIdentifier(), carbonStorePath,
+            dictionaryColumnUniqueIdentifier.getCarbonTableIdentifier());
+    String dictionaryFilePath =
+        carbonTablePath.getDictionaryMetaFilePath(dictionaryColumnUniqueIdentifier
+            .getColumnIdentifier().getColumnId());
+    FileFactory.FileType fileType = FileFactory.getFileType(dictionaryFilePath);
+    CarbonFile carbonFile = FileFactory.getCarbonFile(dictionaryFilePath, fileType);
+    return carbonFile;
+  }
+
+  /**
+   * This method will get the value for the given key. If value does not exist
+   * for the given key, it will check and load the value.
+   *
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   * @param dictionaryInfo
+   * @param lruCacheKey
+   * @param loadSortIndex                    read and load sort index file in memory
+   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
+   */
+  protected void checkAndLoadDictionaryData(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier,
+      DictionaryInfo dictionaryInfo, String lruCacheKey, boolean loadSortIndex)
+      throws CarbonUtilException {
+    try {
+      // read last segment dictionary meta chunk entry to get the end offset of file
+      CarbonFile carbonFile = getDictionaryMetaCarbonFile(dictionaryColumnUniqueIdentifier);
+      boolean dictionaryMetaFileModified =
+          isDictionaryMetaFileModified(carbonFile, dictionaryInfo.getFileTimeStamp(),
+              dictionaryInfo.getDictionaryMetaFileLength());
+      // if dictionary metadata file is modified then only read the last entry from dictionary
+      // meta file
+      if (dictionaryMetaFileModified) {
+        synchronized (dictionaryInfo) {
+          carbonFile = getDictionaryMetaCarbonFile(dictionaryColumnUniqueIdentifier);
+          dictionaryMetaFileModified =
+              isDictionaryMetaFileModified(carbonFile, dictionaryInfo.getFileTimeStamp(),
+                  dictionaryInfo.getDictionaryMetaFileLength());
+          // Double Check :
+          // if dictionary metadata file is modified then only read the last entry from dictionary
+          // meta file
+          if (dictionaryMetaFileModified) {
+            CarbonDictionaryColumnMetaChunk carbonDictionaryColumnMetaChunk =
+                readLastChunkFromDictionaryMetadataFile(dictionaryColumnUniqueIdentifier);
+            // required size will be size total size of file - offset till file is
+            // already read
+            long requiredSize =
+                carbonDictionaryColumnMetaChunk.getEnd_offset() - dictionaryInfo.getMemorySize();
+            if (requiredSize > 0) {
+              boolean columnAddedToLRUCache =
+                  carbonLRUCache.put(lruCacheKey, dictionaryInfo, requiredSize);
+              // if column is successfully added to lru cache then only load the
+              // dictionary data
+              if (columnAddedToLRUCache) {
+                // load dictionary data
+                loadDictionaryData(dictionaryInfo, dictionaryColumnUniqueIdentifier,
+                    dictionaryInfo.getMemorySize(), carbonDictionaryColumnMetaChunk.getEnd_offset(),
+                    loadSortIndex);
+                // set the end offset till where file is read
+                dictionaryInfo
+                    .setOffsetTillFileIsRead(carbonDictionaryColumnMetaChunk.getEnd_offset());
+                dictionaryInfo.setFileTimeStamp(carbonFile.getLastModifiedTime());
+                dictionaryInfo.setDictionaryMetaFileLength(carbonFile.getSize());
+              } else {
+                throw new CarbonUtilException(
+                    "Cannot load dictionary into memory. Not enough memory available");
+              }
+            }
+          }
+        }
+      }
+      // increment the column access count
+      incrementDictionaryAccessCount(dictionaryInfo);
+    } catch (IOException e) {
+      throw new CarbonUtilException(e.getMessage());
+    }
+  }
+
+  /**
+   * This method will prepare the lru cache key and return the same
+   *
+   * @param columnIdentifier
+   * @return
+   */
+  protected String getLruCacheKey(String columnIdentifier, CacheType cacheType) {
+    String lruCacheKey =
+        columnIdentifier + CarbonCommonConstants.UNDERSCORE + cacheType.getCacheName();
+    return lruCacheKey;
+  }
+
+  /**
+   * This method will check and load the dictionary file in memory for a given column
+   *
+   * @param dictionaryInfo                   holds dictionary information and data
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   * @param dictionaryChunkStartOffset       start offset from where dictionary file has to
+   *                                         be read
+   * @param dictionaryChunkEndOffset         end offset till where dictionary file has to
+   *                                         be read
+   * @param loadSortIndex
+   * @throws IOException
+   */
+  private void loadDictionaryData(DictionaryInfo dictionaryInfo,
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier,
+      long dictionaryChunkStartOffset, long dictionaryChunkEndOffset, boolean loadSortIndex)
+      throws IOException {
+    DictionaryCacheLoader dictionaryCacheLoader =
+        new DictionaryCacheLoaderImpl(dictionaryColumnUniqueIdentifier.getCarbonTableIdentifier(),
+            carbonStorePath);
+    dictionaryCacheLoader
+        .load(dictionaryInfo, dictionaryColumnUniqueIdentifier.getColumnIdentifier(),
+            dictionaryChunkStartOffset, dictionaryChunkEndOffset, loadSortIndex);
+  }
+
+  /**
+   * This method will increment the access count for a given dictionary column
+   *
+   * @param dictionaryInfo
+   */
+  protected void incrementDictionaryAccessCount(DictionaryInfo dictionaryInfo) {
+    dictionaryInfo.incrementAccessCount();
+  }
+
+  /**
+   * This method will update the dictionary acceess count which is required for its removal
+   * from column LRU cache
+   *
+   * @param dictionaryList
+   */
+  protected void clearDictionary(List<Dictionary> dictionaryList) {
+    for (Dictionary dictionary : dictionaryList) {
+      dictionary.clear();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ColumnDictionaryInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ColumnDictionaryInfo.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ColumnDictionaryInfo.java
new file mode 100644
index 0000000..08d9bef
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ColumnDictionaryInfo.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.nio.charset.Charset;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.core.util.CarbonProperties;
+
+/**
+ * class that implements methods specific for dictionary data look up
+ */
+public class ColumnDictionaryInfo extends AbstractColumnDictionaryInfo {
+
+  /**
+   * index after members are sorted
+   */
+  private AtomicReference<List<Integer>> sortOrderReference =
+      new AtomicReference<List<Integer>>(new ArrayList<Integer>());
+
+  /**
+   * inverted index to retrieve the member
+   */
+  private AtomicReference<List<Integer>> sortReverseOrderReference =
+      new AtomicReference<List<Integer>>(new ArrayList<Integer>());
+
+  private DataType dataType;
+
+  public ColumnDictionaryInfo(DataType dataType) {
+    this.dataType = dataType;
+  }
+
+  /**
+   * This method will find and return the surrogate key for a given dictionary value
+   * Applicable scenario:
+   * 1. Incremental data load : Dictionary will not be generated for existing values. For
+   * that values have to be looked up in the existing dictionary cache.
+   * 2. Filter scenarios where from value surrogate key has to be found.
+   *
+   * @param value dictionary value as byte array
+   * @return if found returns key else 0
+   */
+  @Override public int getSurrogateKey(byte[] value) {
+    return getSurrogateKeyFromDictionaryValue(value);
+  }
+
+  /**
+   * This method will find and return the sort index for a given dictionary id.
+   * Applicable scenarios:
+   * 1. Used in case of order by queries when data sorting is required
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return if found returns key else 0
+   */
+  @Override public int getSortedIndex(int surrogateKey) {
+    if (surrogateKey > sortReverseOrderReference.get().size()
+        || surrogateKey < MINIMUM_SURROGATE_KEY) {
+      return -1;
+    }
+    // decrement surrogate key as surrogate key basically means the index in array list
+    // because surrogate key starts from 1 and index of list from 0, so it needs to be
+    // decremented by 1
+    return sortReverseOrderReference.get().get(surrogateKey - 1);
+  }
+
+  /**
+   * This method will find and return the dictionary value from sorted index.
+   * Applicable scenarios:
+   * 1. Query final result preparation in case of order by queries:
+   * While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param sortedIndex sort index of dictionary value
+   * @return value if found else null
+   */
+  @Override public String getDictionaryValueFromSortedIndex(int sortedIndex) {
+    if (sortedIndex > sortReverseOrderReference.get().size()
+        || sortedIndex < MINIMUM_SURROGATE_KEY) {
+      return null;
+    }
+    // decrement surrogate key as surrogate key basically means the index in array list
+    // because surrogate key starts from 1, sort index will start form 1 and index
+    // of list from 0, so it needs to be decremented by 1
+    int surrogateKey = sortOrderReference.get().get(sortedIndex - 1);
+    return getDictionaryValueForKey(surrogateKey);
+  }
+
+  /**
+   * This method will add a new dictionary chunk to existing list of dictionary chunks
+   *
+   * @param dictionaryChunk
+   */
+  @Override public void addDictionaryChunk(List<byte[]> dictionaryChunk) {
+    dictionaryChunks.add(dictionaryChunk);
+  }
+
+  /**
+   * This method will set the sort order index of a dictionary column.
+   * Sort order index if the index of dictionary values after they are sorted.
+   *
+   * @param sortOrderIndex
+   */
+  @Override public void setSortOrderIndex(List<Integer> sortOrderIndex) {
+    sortOrderReference.set(sortOrderIndex);
+  }
+
+  /**
+   * This method will set the sort reverse index of a dictionary column.
+   * Sort reverse index is the index of dictionary values before they are sorted.
+   *
+   * @param sortReverseOrderIndex
+   */
+  @Override public void setSortReverseOrderIndex(List<Integer> sortReverseOrderIndex) {
+    sortReverseOrderReference.set(sortReverseOrderIndex);
+  }
+
+  /**
+   * This method will apply binary search logic to find the surrogate key for the
+   * given value
+   *
+   * @param key to be searched
+   * @return
+   */
+  private int getSurrogateKeyFromDictionaryValue(byte[] key) {
+    String filterKey = new String(key, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
+    int low = 0;
+    List<Integer> sortedSurrogates = sortOrderReference.get();
+    int high = sortedSurrogates.size() - 1;
+    while (low <= high) {
+      int mid = (low + high) >>> 1;
+      int surrogateKey = sortedSurrogates.get(mid);
+      byte[] dictionaryValue = getDictionaryBytesFromSurrogate(surrogateKey);
+      int cmp = -1;
+      if (this.getDataType() != DataType.STRING) {
+        cmp = compareFilterKeyWithDictionaryKey(new String(dictionaryValue), filterKey,
+            this.getDataType());
+
+      } else {
+        cmp = ByteUtil.UnsafeComparer.INSTANCE.compareTo(dictionaryValue, key);
+      }
+      if (cmp < 0) {
+        low = mid + 1;
+      } else if (cmp > 0) {
+        high = mid - 1;
+      } else {
+        return surrogateKey; // key found
+      }
+    }
+    return 0;
+  }
+
+  /**
+   * This method will apply binary search logic to find the surrogate key for the
+   * given value
+   *
+   * @param byteValuesOfFilterMembers to be searched
+   * @param surrogates
+   * @return
+   */
+  public void getIncrementalSurrogateKeyFromDictionary(List<byte[]> byteValuesOfFilterMembers,
+      List<Integer> surrogates) {
+    List<Integer> sortedSurrogates = sortOrderReference.get();
+    int low = 0;
+    for (byte[] byteValueOfFilterMember : byteValuesOfFilterMembers) {
+      String filterKey = new String(byteValueOfFilterMember,
+          Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
+      if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(filterKey)) {
+        surrogates.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL_SURROGATE_KEY);
+        continue;
+      }
+      int high = sortedSurrogates.size() - 1;
+      while (low <= high) {
+        int mid = (low + high) >>> 1;
+        int surrogateKey = sortedSurrogates.get(mid);
+        byte[] dictionaryValue = getDictionaryBytesFromSurrogate(surrogateKey);
+        int cmp = -1;
+        //fortify fix
+        if (null == dictionaryValue) {
+          cmp = -1;
+        } else if (this.getDataType() != DataType.STRING) {
+          cmp = compareFilterKeyWithDictionaryKey(new String(dictionaryValue), filterKey,
+              this.getDataType());
+
+        } else {
+          cmp =
+              ByteUtil.UnsafeComparer.INSTANCE.compareTo(dictionaryValue, byteValueOfFilterMember);
+        }
+        if (cmp < 0) {
+          low = mid + 1;
+        } else if (cmp > 0) {
+          high = mid - 1;
+        } else {
+
+          surrogates.add(surrogateKey);
+          low = mid;
+          break;
+        }
+      }
+    }
+    //Default value has to be added
+    if (surrogates.isEmpty()) {
+      surrogates.add(0);
+    }
+  }
+
+  private int compareFilterKeyWithDictionaryKey(String dictionaryVal, String memberVal,
+      DataType dataType) {
+    try {
+      switch (dataType) {
+        case SHORT:
+          return Short.compare((Short.parseShort(dictionaryVal)), (Short.parseShort(memberVal)));
+        case INT:
+          return Integer.compare((Integer.parseInt(dictionaryVal)), (Integer.parseInt(memberVal)));
+        case DOUBLE:
+          return Double
+              .compare((Double.parseDouble(dictionaryVal)), (Double.parseDouble(memberVal)));
+        case LONG:
+          return Long.compare((Long.parseLong(dictionaryVal)), (Long.parseLong(memberVal)));
+        case BOOLEAN:
+          return Boolean
+              .compare((Boolean.parseBoolean(dictionaryVal)), (Boolean.parseBoolean(memberVal)));
+        case TIMESTAMP:
+          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
+              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+          Date dateToStr;
+          Date dictionaryDate;
+          dateToStr = parser.parse(memberVal);
+          dictionaryDate = parser.parse(dictionaryVal);
+          return dictionaryDate.compareTo(dateToStr);
+        case DECIMAL:
+          java.math.BigDecimal javaDecValForDictVal = new java.math.BigDecimal(dictionaryVal);
+          java.math.BigDecimal javaDecValForMemberVal = new java.math.BigDecimal(memberVal);
+          return javaDecValForDictVal.compareTo(javaDecValForMemberVal);
+        default:
+          return -1;
+      }
+    } catch (Exception e) {
+      //In all data types excluding String data type the null member will be the highest
+      //while doing search in dictioary when the member comparison happens with filter member
+      //which is also null member, since the parsing fails in other data type except string
+      //explicit comparison is required, is both are null member then system has to return 0.
+      if (memberVal.equals(dictionaryVal)) {
+        return 0;
+      }
+      return 1;
+    }
+  }
+
+  /**
+   * getDataType().
+   *
+   * @return
+   */
+  public DataType getDataType() {
+    return dataType;
+  }
+
+}


[17/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefaultLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefaultLong.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefaultLong.java
deleted file mode 100644
index d37cb46..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefaultLong.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.datastorage.store.compression.type;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressMaxMinDefaultLong extends UnCompressMaxMinLong {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressMaxMinDefaultLong.class.getName());
-  private static Compressor<long[]> longCompressor =
-      SnappyCompression.SnappyLongCompression.INSTANCE;
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException ex5) {
-      LOGGER.error(ex5, ex5.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressMaxMinByteForLong byte1 = new UnCompressMaxMinByteForLong();
-    byte1.setValue(longCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressMaxMinByteForLong();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    long maxValue = (long) maxValueObject;
-    long[] vals = new long[value.length];
-    CarbonReadDataHolder dataHolderInfoObj = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = maxValue - value[i];
-      }
-
-    }
-    dataHolderInfoObj.setReadableLongValues(vals);
-    return dataHolderInfoObj;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinFloat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinFloat.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinFloat.java
deleted file mode 100644
index 0ebf71e..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinFloat.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressMaxMinFloat implements UnCompressValue<float[]> {
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressMaxMinFloat.class.getName());
-  /**
-   * floatCompressor
-   */
-  private static Compressor<float[]> floatCompressor =
-      SnappyCompression.SnappyFloatCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private float[] value;
-
-  @Override public void setValue(float[] value) {
-    this.value = (float[]) value;
-
-  }
-
-  @Override public UnCompressValue getNew() {
-    try {
-      return (UnCompressValue) clone();
-    } catch (CloneNotSupportedException ex4) {
-      LOGGER.error(ex4, ex4.getMessage());
-    }
-    return null;
-  }
-
-  @Override public UnCompressValue compress() {
-
-    UnCompressMaxMinByte byte1 = new UnCompressMaxMinByte();
-    byte1.setValue(floatCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public UnCompressValue uncompress(DataType dTypeVal) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToFloatArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public UnCompressValue getCompressorObject() {
-    return new UnCompressMaxMinByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder dataHolderVal = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = maxValue - value[i];
-      }
-
-    }
-    dataHolderVal.setReadableDoubleValues(vals);
-    return dataHolderVal;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinInt.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinInt.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinInt.java
deleted file mode 100644
index 9d47879..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinInt.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressMaxMinInt implements ValueCompressonHolder.UnCompressValue<int[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressMaxMinInt.class.getName());
-
-  /**
-   * intCompressor.
-   */
-  private static Compressor<int[]> intCompressor = SnappyCompression.SnappyIntCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private int[] value;
-
-  @Override public void setValue(int[] value) {
-    this.value = value;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressMaxMinByte byte1 = new UnCompressMaxMinByte();
-    byte1.setValue(intCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue uncompress(
-      ValueCompressionUtil.DataType dataTypeValue) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToIntArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressMaxMinByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decVal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = maxValue - value[i];
-      }
-
-    }
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinLong.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinLong.java
deleted file mode 100644
index 5291dff..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinLong.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressMaxMinLong implements ValueCompressonHolder.UnCompressValue<long[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressMaxMinLong.class.getName());
-  /**
-   * longCompressor.
-   */
-  private static Compressor<long[]> longCompressor =
-      SnappyCompression.SnappyLongCompression.INSTANCE;
-  /**
-   * value.
-   */
-  protected long[] value;
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressMaxMinByte unCompressByte = new UnCompressMaxMinByte();
-    unCompressByte.setValue(longCompressor.compress(value));
-    return unCompressByte;
-  }
-
-  @Override public void setValue(long[] value) {
-    this.value = value;
-
-  }
-
-  @Override
-  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToLongArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressMaxMinByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder data = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = maxValue - value[i];
-      }
-
-    }
-    data.setReadableDoubleValues(vals);
-    return data;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinShort.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinShort.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinShort.java
deleted file mode 100644
index c2951dd..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinShort.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressMaxMinShort implements ValueCompressonHolder.UnCompressValue<short[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressMaxMinShort.class.getName());
-  /**
-   * shortCompressor.
-   */
-  private static Compressor<short[]> shortCompressor =
-      SnappyCompression.SnappyShortCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private short[] value;
-
-  @Override public void setValue(short[] value) {
-    this.value = value;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException ex3) {
-      LOGGER.error(ex3, ex3.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-
-    UnCompressMaxMinByte byte1 = new UnCompressMaxMinByte();
-    byte1.setValue(shortCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToShortArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressMaxMinByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder carbonDataHolderObj = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = maxValue - value[i];
-      }
-
-    }
-    carbonDataHolderObj.setReadableDoubleValues(vals);
-    return carbonDataHolderObj;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalByte.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalByte.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalByte.java
deleted file mode 100644
index 8916fed..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalByte.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNonDecimalByte implements ValueCompressonHolder.UnCompressValue<byte[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalByte.class.getName());
-  /**
-   * byteCompressor.
-   */
-  private static Compressor<byte[]> byteCompressor =
-      SnappyCompression.SnappyByteCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private byte[] value;
-
-  @Override public void setValue(byte[] value) {
-    this.value = value;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
-    byte1.setValue(byteCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
-    ValueCompressonHolder.UnCompressValue byte1 =
-        ValueCompressionUtil.unCompressNonDecimal(dataType, dataType);
-    ValueCompressonHolder.unCompress(dataType, byte1, value);
-    return byte1;
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    this.value = value;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return value;
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i] / Math.pow(10, decimal);
-    }
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalDefault.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalDefault.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalDefault.java
deleted file mode 100644
index d122e12..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalDefault.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNonDecimalDefault
-    implements ValueCompressonHolder.UnCompressValue<double[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalDefault.class.getName());
-  /**
-   * doubleCompressor.
-   */
-  private static Compressor<double[]> doubleCompressor =
-      SnappyCompression.SnappyDoubleCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private double[] value;
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException cnse1) {
-      LOGGER.error(cnse1, cnse1.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
-    byte1.setValue(doubleCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
-    return null;
-  }
-
-  @Override public void setValue(double[] value) {
-    this.value = value;
-
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToDoubleArray(buffer, value.length);
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double[] dblVals = new double[value.length];
-    for (int i = 0; i < dblVals.length; i++) {
-      dblVals[i] = value[i] / Math.pow(10, decimal);
-    }
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    dataHolder.setReadableDoubleValues(dblVals);
-    return dataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalFloat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalFloat.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalFloat.java
deleted file mode 100644
index 2081c14..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalFloat.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressNonDecimalFloat implements ValueCompressonHolder.UnCompressValue<float[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalFloat.class.getName());
-  /**
-   * floatCompressor
-   */
-  private static Compressor<float[]> floatCompressor =
-      SnappyCompression.SnappyFloatCompression.INSTANCE;
-  /**
-   * value.
-   */
-
-  private float[] value;
-
-  @Override public void setValue(float[] value) {
-    this.value = value;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException cnsexception) {
-      LOGGER
-          .error(cnsexception, cnsexception.getMessage());
-    }
-    return null;
-  }
-
-  public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
-    byte1.setValue(floatCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override
-  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
-    return null;
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToFloatArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double[] vals = new double[value.length];
-    for (int m = 0; m < vals.length; m++) {
-      vals[m] = value[m] / Math.pow(10, decimal);
-    }
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalInt.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalInt.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalInt.java
deleted file mode 100644
index c7143eb..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalInt.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNonDecimalInt implements UnCompressValue<int[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalInt.class.getName());
-  /**
-   * intCompressor.
-   */
-  private static Compressor<int[]> intCompressor = SnappyCompression.SnappyIntCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private int[] value;
-
-  @Override public void setValue(int[] value) {
-    this.value = (int[]) value;
-
-  }
-
-  @Override public UnCompressValue getNew() {
-    try {
-      return (UnCompressValue) clone();
-    } catch (CloneNotSupportedException csne1) {
-      LOGGER.error(csne1, csne1.getMessage());
-    }
-    return null;
-  }
-
-  @Override public UnCompressValue compress() {
-    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
-    byte1.setValue(intCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] bytesArr) {
-    ByteBuffer buffer = ByteBuffer.wrap(bytesArr);
-    this.value = ValueCompressionUtil.convertToIntArray(buffer, bytesArr.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double[] vals = new double[value.length];
-    for (int k = 0; k < vals.length; k++) {
-      vals[k] = value[k] / Math.pow(10, decimal);
-    }
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-
-  @Override public UnCompressValue uncompress(DataType dataType) {
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalLong.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalLong.java
deleted file mode 100644
index 4adb1ee..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalLong.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNonDecimalLong implements UnCompressValue<long[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalLong.class.getName());
-
-  /**
-   * longCompressor.
-   */
-  private static Compressor<long[]> longCompressor =
-      SnappyCompression.SnappyLongCompression.INSTANCE;
-
-  /**
-   * value.
-   */
-  private long[] value;
-
-  @Override public void setValue(long[] value) {
-    this.value = value;
-  }
-
-  @Override public UnCompressValue compress() {
-    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
-    byte1.setValue(longCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public UnCompressValue getNew() {
-    try {
-      return (UnCompressValue) clone();
-    } catch (CloneNotSupportedException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return null;
-  }
-
-  @Override public UnCompressValue uncompress(DataType dataType) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] bytes) {
-    ByteBuffer buffer = ByteBuffer.wrap(bytes);
-    this.value = ValueCompressionUtil.convertToLongArray(buffer, bytes.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double[] vals = new double[value.length];
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i] / Math.pow(10, decimal);
-    }
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinByte.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinByte.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinByte.java
deleted file mode 100644
index 80536d1..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinByte.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNonDecimalMaxMinByte
-    implements ValueCompressonHolder.UnCompressValue<byte[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinByte.class.getName());
-  /**
-   * byteCompressor.
-   */
-  private static Compressor<byte[]> byteCompressor =
-      SnappyCompression.SnappyByteCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private byte[] value;
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException cloneNotSupportedException) {
-      LOGGER.error(cloneNotSupportedException,
-          cloneNotSupportedException.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressNonDecimalMaxMinByte byte1 = new UnCompressNonDecimalMaxMinByte();
-    byte1.setValue(byteCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
-    ValueCompressonHolder.UnCompressValue byte1 =
-        ValueCompressionUtil.unCompressNonDecimalMaxMin(dataType, dataType);
-    ValueCompressonHolder.unCompress(dataType, byte1, value);
-    return byte1;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return value;
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalMaxMinByte();
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    this.value = value;
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimalVal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i] / Math.pow(10, decimalVal);
-
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = (maxValue - value[i]) / Math.pow(10, decimalVal);
-      }
-
-    }
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-
-  @Override public void setValue(byte[] value) {
-    this.value = value;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinDefault.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinDefault.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinDefault.java
deleted file mode 100644
index e399933..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinDefault.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNonDecimalMaxMinDefault implements UnCompressValue<double[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinDefault.class.getName());
-  /**
-   * doubleCompressor.
-   */
-  private static Compressor<double[]> doubleCompressor =
-      SnappyCompression.SnappyDoubleCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private double[] value;
-
-  @Override public void setValue(double[] value) {
-    this.value = (double[]) value;
-  }
-
-  @Override public UnCompressValue getNew() {
-    try {
-      return (UnCompressValue) clone();
-    } catch (CloneNotSupportedException exce) {
-      LOGGER.error(exce, exce.getMessage());
-    }
-    return null;
-  }
-
-  @Override public UnCompressValue compress() {
-    UnCompressNonDecimalMaxMinByte byte1 = new UnCompressNonDecimalMaxMinByte();
-    byte1.setValue(doubleCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToDoubleArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalMaxMinByte();
-  }
-
-  @Override public UnCompressValue uncompress(DataType dataType) {
-    return null;
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double maxVal = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder holder = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i] / Math.pow(10, decimal);
-
-      if (value[i] == 0) {
-        vals[i] = maxVal;
-      } else {
-        vals[i] = (maxVal - value[i]) / Math.pow(10, decimal);
-      }
-
-    }
-    holder.setReadableDoubleValues(vals);
-    return holder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinFloat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinFloat.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinFloat.java
deleted file mode 100644
index d63d825..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinFloat.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressNonDecimalMaxMinFloat
-    implements ValueCompressonHolder.UnCompressValue<float[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinFloat.class.getName());
-  /**
-   * floatCompressor
-   */
-  private static Compressor<float[]> floatCompressor =
-      SnappyCompression.SnappyFloatCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private float[] value;
-
-  @Override public void setValue(float[] value) {
-    this.value = value;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException exc1) {
-      LOGGER.error(exc1, exc1.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-
-    UnCompressNonDecimalMaxMinByte byte1 = new UnCompressNonDecimalMaxMinByte();
-    byte1.setValue(floatCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override
-  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
-    return null;
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToFloatArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalMaxMinByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder holder = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i] / Math.pow(10, decimal);
-
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = (maxValue - value[i]) / Math.pow(10, decimal);
-      }
-    }
-    holder.setReadableDoubleValues(vals);
-    return holder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinInt.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinInt.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinInt.java
deleted file mode 100644
index 01b4f67..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinInt.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNonDecimalMaxMinInt implements UnCompressValue<int[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinInt.class.getName());
-  /**
-   * intCompressor.
-   */
-  private static Compressor<int[]> intCompressor = SnappyCompression.SnappyIntCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private int[] value;
-
-  @Override public void setValue(int[] value) {
-    this.value = value;
-
-  }
-
-  @Override public UnCompressValue getNew() {
-    try {
-      return (UnCompressValue) clone();
-    } catch (CloneNotSupportedException ex1) {
-      LOGGER.error(ex1, ex1.getMessage());
-    }
-    return null;
-  }
-
-  @Override public UnCompressValue compress() {
-
-    UnCompressNonDecimalMaxMinByte byte1 = new UnCompressNonDecimalMaxMinByte();
-    byte1.setValue(intCompressor.compress(value));
-    return byte1;
-
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToIntArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalMaxMinByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder dataHolderInfo = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i] / Math.pow(10, decimal);
-
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = (maxValue - value[i]) / Math.pow(10, decimal);
-      }
-
-    }
-    dataHolderInfo.setReadableDoubleValues(vals);
-    return dataHolderInfo;
-  }
-
-  @Override public UnCompressValue uncompress(DataType dataType) {
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinLong.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinLong.java
deleted file mode 100644
index 0aa7f60..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinLong.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressNonDecimalMaxMinLong
-    implements ValueCompressonHolder.UnCompressValue<long[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinLong.class.getName());
-
-  /**
-   * longCompressor.
-   */
-  private static Compressor<long[]> longCompressor =
-      SnappyCompression.SnappyLongCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private long[] value;
-
-  @Override public void setValue(long[] value) {
-    this.value = value;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException exc) {
-      LOGGER.error(exc, exc.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-
-    UnCompressNonDecimalMaxMinByte uNonDecByte = new UnCompressNonDecimalMaxMinByte();
-    uNonDecByte.setValue(longCompressor.compress(value));
-    return uNonDecByte;
-  }
-
-  @Override
-  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buff = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToLongArray(buff, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalMaxMinByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder carbonDataHolder = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i] / Math.pow(10, decimal);
-
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = (maxValue - value[i]) / Math.pow(10, decimal);
-      }
-
-    }
-    carbonDataHolder.setReadableDoubleValues(vals);
-    return carbonDataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinShort.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinShort.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinShort.java
deleted file mode 100644
index 90e761a..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinShort.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressNonDecimalMaxMinShort
-    implements ValueCompressonHolder.UnCompressValue<short[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinShort.class.getName());
-  /**
-   * shortCompressor.
-   */
-  private static Compressor<short[]> shortCompressor =
-      SnappyCompression.SnappyShortCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private short[] value;
-
-  @Override public void setValue(short[] value) {
-    this.value = value;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException exception5) {
-      LOGGER.error(exception5, exception5.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressNonDecimalMaxMinByte byte1 = new UnCompressNonDecimalMaxMinByte();
-    byte1.setValue(shortCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue uncompress(
-      ValueCompressionUtil.DataType dataTypeVal) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToShortArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalMaxMinByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i] / Math.pow(10, decimal);
-
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = (maxValue - value[i]) / Math.pow(10, decimal);
-      }
-
-    }
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalShort.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalShort.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalShort.java
deleted file mode 100644
index b25f957..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalShort.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressNonDecimalShort implements ValueCompressonHolder.UnCompressValue<short[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNonDecimalShort.class.getName());
-  /**
-   * shortCompressor.
-   */
-  private static Compressor<short[]> shortCompressor =
-      SnappyCompression.SnappyShortCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private short[] value;
-
-  @Override public void setValue(short[] value) {
-    this.value = value;
-
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException exception1) {
-      LOGGER.error(exception1, exception1.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
-    byte1.setValue(shortCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override
-  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
-    return null;
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToShortArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNonDecimalByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    double[] vals = new double[value.length];
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i] / Math.pow(10, decimal);
-    }
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneByte.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneByte.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneByte.java
deleted file mode 100644
index 3732fbb..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneByte.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNoneByte implements UnCompressValue<byte[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNoneByte.class.getName());
-
-  /**
-   * byteCompressor.
-   */
-  private static Compressor<byte[]> byteCompressor =
-      SnappyCompression.SnappyByteCompression.INSTANCE;
-
-  /**
-   * value.
-   */
-  private byte[] value;
-
-  @Override public UnCompressValue getNew() {
-    try {
-      return (UnCompressValue) clone();
-    } catch (CloneNotSupportedException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return null;
-  }
-
-  @Override public void setValue(byte[] value) {
-    this.value = value;
-  }
-
-  @Override public UnCompressValue uncompress(DataType dataType) {
-    UnCompressValue byte1 = ValueCompressionUtil.unCompressNone(dataType, dataType);
-    ValueCompressonHolder.unCompress(dataType, byte1, value);
-    return byte1;
-  }
-
-  @Override public UnCompressValue compress() {
-    UnCompressNoneByte byte1 = new UnCompressNoneByte();
-    byte1.setValue(byteCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return value;
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    this.value = value;
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public UnCompressValue getCompressorObject() {
-    return new UnCompressNoneByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    CarbonReadDataHolder dataHldr = new CarbonReadDataHolder();
-    double[] vals = new double[value.length];
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i];
-    }
-    dataHldr.setReadableDoubleValues(vals);
-    return dataHldr;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneDefault.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneDefault.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneDefault.java
deleted file mode 100644
index be32f0b..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneDefault.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNoneDefault implements UnCompressValue<double[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNoneDefault.class.getName());
-  /**
-   * doubleCompressor.
-   */
-  private static Compressor<double[]> doubleCompressor =
-      SnappyCompression.SnappyDoubleCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private double[] value;
-
-  @Override public void setValue(double[] value) {
-    this.value = value;
-
-  }
-
-  @Override public UnCompressValue getNew() {
-    try {
-      return (UnCompressValue) clone();
-    } catch (CloneNotSupportedException exception1) {
-      LOGGER.error(exception1, exception1.getMessage());
-    }
-    return null;
-  }
-
-  @Override public UnCompressValue compress() {
-    UnCompressNoneByte byte1 = new UnCompressNoneByte();
-    byte1.setValue(doubleCompressor.compress(value));
-
-    return byte1;
-  }
-
-  @Override public UnCompressValue uncompress(DataType dataType) {
-    return null;
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public UnCompressValue getCompressorObject() {
-    return new UnCompressNoneByte();
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToDoubleArray(buffer, value.length);
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    dataHolder.setReadableDoubleValues(value);
-    return dataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneFloat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneFloat.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneFloat.java
deleted file mode 100644
index a7c1a27..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneFloat.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressNoneFloat implements ValueCompressonHolder.UnCompressValue<float[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNoneFloat.class.getName());
-  /**
-   * floatCompressor
-   */
-  private static Compressor<float[]> floatCompressor =
-      SnappyCompression.SnappyFloatCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private float[] value;
-
-  @Override public void setValue(float[] value) {
-    this.value = value;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException ex5) {
-      LOGGER.error(ex5, ex5.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressNoneByte byte1 = new UnCompressNoneByte();
-    byte1.setValue(floatCompressor.compress(value));
-
-    return byte1;
-
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToFloatArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNoneByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i];
-    }
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-
-  @Override
-  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneInt.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneInt.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneInt.java
deleted file mode 100644
index 225c254..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressNoneInt.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressNoneInt implements ValueCompressonHolder.UnCompressValue<int[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressNoneInt.class.getName());
-  /**
-   * intCompressor.
-   */
-  private static Compressor<int[]> intCompressor = SnappyCompression.SnappyIntCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private int[] value;
-
-  @Override public void setValue(int[] value) {
-    this.value = value;
-
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException exc) {
-      LOGGER.error(exc, exc.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressNoneByte byte1 = new UnCompressNoneByte();
-    byte1.setValue(intCompressor.compress(value));
-
-    return byte1;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
-    return null;
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToIntArray(buffer, value.length);
-
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressNoneByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    CarbonReadDataHolder dataHolderInfoObj = new CarbonReadDataHolder();
-    double[] vals = new double[value.length];
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i];
-    }
-
-    dataHolderInfoObj.setReadableDoubleValues(vals);
-    return dataHolderInfoObj;
-  }
-
-}



[25/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java b/core/src/main/java/org/apache/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java
new file mode 100644
index 0000000..2abe39a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.result.iterator;
+
+import java.util.List;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.datastore.DataRefNodeFinder;
+import org.apache.carbondata.core.carbon.datastore.impl.btree.BTreeDataRefNodeFinder;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.model.QueryModel;
+import org.apache.carbondata.scan.processor.AbstractDataBlockIterator;
+import org.apache.carbondata.scan.processor.impl.DataBlockIteratorImpl;
+
+/**
+ * In case of detail query we cannot keep all the records in memory so for
+ * executing that query are returning a iterator over block and every time next
+ * call will come it will execute the block and return the result
+ */
+public abstract class AbstractDetailQueryResultIterator extends CarbonIterator {
+
+  /**
+   * LOGGER.
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(AbstractDetailQueryResultIterator.class.getName());
+
+  /**
+   * execution info of the block
+   */
+  protected List<BlockExecutionInfo> blockExecutionInfos;
+
+  /**
+   * number of cores which can be used
+   */
+  private int batchSize;
+
+  /**
+   * file reader which will be used to execute the query
+   */
+  protected FileHolder fileReader;
+
+  protected AbstractDataBlockIterator dataBlockIterator;
+
+  protected boolean nextBatch = false;
+
+  public AbstractDetailQueryResultIterator(List<BlockExecutionInfo> infos, QueryModel queryModel) {
+    String batchSizeString =
+        CarbonProperties.getInstance().getProperty(CarbonCommonConstants.DETAIL_QUERY_BATCH_SIZE);
+    if (null != batchSizeString) {
+      try {
+        batchSize = Integer.parseInt(batchSizeString);
+      } catch (NumberFormatException ne) {
+        LOGGER.error("Invalid inmemory records size. Using default value");
+        batchSize = CarbonCommonConstants.DETAIL_QUERY_BATCH_SIZE_DEFAULT;
+      }
+    } else {
+      batchSize = CarbonCommonConstants.DETAIL_QUERY_BATCH_SIZE_DEFAULT;
+    }
+
+    this.blockExecutionInfos = infos;
+    this.fileReader = FileFactory.getFileHolder(
+        FileFactory.getFileType(queryModel.getAbsoluteTableIdentifier().getStorePath()));
+    intialiseInfos();
+  }
+
+  private void intialiseInfos() {
+    for (BlockExecutionInfo blockInfo : blockExecutionInfos) {
+      DataRefNodeFinder finder = new BTreeDataRefNodeFinder(blockInfo.getEachColumnValueSize());
+      DataRefNode startDataBlock = finder
+          .findFirstDataBlock(blockInfo.getDataBlock().getDataRefNode(), blockInfo.getStartKey());
+      DataRefNode endDataBlock = finder
+          .findLastDataBlock(blockInfo.getDataBlock().getDataRefNode(), blockInfo.getEndKey());
+      long numberOfBlockToScan = endDataBlock.nodeNumber() - startDataBlock.nodeNumber() + 1;
+      blockInfo.setFirstDataBlock(startDataBlock);
+      blockInfo.setNumberOfBlockToScan(numberOfBlockToScan);
+    }
+  }
+
+  @Override public boolean hasNext() {
+    if ((dataBlockIterator != null && dataBlockIterator.hasNext()) || nextBatch) {
+      return true;
+    } else {
+      return blockExecutionInfos.size() > 0;
+    }
+  }
+
+  protected void updateDataBlockIterator() {
+    if (dataBlockIterator == null || !dataBlockIterator.hasNext()) {
+      dataBlockIterator = getDataBlockIterator();
+      while (dataBlockIterator != null && !dataBlockIterator.hasNext()) {
+        dataBlockIterator = getDataBlockIterator();
+      }
+    }
+  }
+
+  private DataBlockIteratorImpl getDataBlockIterator() {
+    if(blockExecutionInfos.size() > 0) {
+      BlockExecutionInfo executionInfo = blockExecutionInfos.get(0);
+      blockExecutionInfos.remove(executionInfo);
+      return new DataBlockIteratorImpl(executionInfo, fileReader, batchSize);
+    }
+    return null;
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/iterator/ChunkRowIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/iterator/ChunkRowIterator.java b/core/src/main/java/org/apache/carbondata/scan/result/iterator/ChunkRowIterator.java
new file mode 100644
index 0000000..680b374
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/iterator/ChunkRowIterator.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.result.iterator;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.scan.result.BatchResult;
+
+/**
+ * Iterator over row result
+ */
+public class ChunkRowIterator extends CarbonIterator<Object[]> {
+
+  /**
+   * iterator over chunk result
+   */
+  private CarbonIterator<BatchResult> iterator;
+
+  /**
+   * currect chunk
+   */
+  private BatchResult currentchunk;
+
+  public ChunkRowIterator(CarbonIterator<BatchResult> iterator) {
+    this.iterator = iterator;
+    if (iterator.hasNext()) {
+      currentchunk = iterator.next();
+    }
+  }
+
+  /**
+   * Returns {@code true} if the iteration has more elements. (In other words,
+   * returns {@code true} if {@link #next} would return an element rather than
+   * throwing an exception.)
+   *
+   * @return {@code true} if the iteration has more elements
+   */
+  @Override public boolean hasNext() {
+    if (null != currentchunk) {
+      if ((currentchunk.hasNext())) {
+        return true;
+      } else if (!currentchunk.hasNext()) {
+        while (iterator.hasNext()) {
+          currentchunk = iterator.next();
+          if (currentchunk != null && currentchunk.hasNext()) {
+            return true;
+          }
+        }
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Returns the next element in the iteration.
+   *
+   * @return the next element in the iteration
+   */
+  @Override public Object[] next() {
+    return currentchunk.next();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/iterator/DetailQueryResultIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/iterator/DetailQueryResultIterator.java b/core/src/main/java/org/apache/carbondata/scan/result/iterator/DetailQueryResultIterator.java
new file mode 100644
index 0000000..fa804a5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/iterator/DetailQueryResultIterator.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.result.iterator;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.model.QueryModel;
+import org.apache.carbondata.scan.result.BatchResult;
+
+/**
+ * In case of detail query we cannot keep all the records in memory so for
+ * executing that query are returning a iterator over block and every time next
+ * call will come it will execute the block and return the result
+ */
+public class DetailQueryResultIterator extends AbstractDetailQueryResultIterator {
+
+  private ExecutorService execService = Executors.newFixedThreadPool(1);
+
+  private Future<BatchResult> future;
+
+  private final Object lock = new Object();
+
+  public DetailQueryResultIterator(List<BlockExecutionInfo> infos, QueryModel queryModel) {
+    super(infos, queryModel);
+  }
+
+  @Override public BatchResult next() {
+    BatchResult result;
+    try {
+      if (future == null) {
+        future = execute();
+      }
+      result = future.get();
+      nextBatch = false;
+      if (hasNext()) {
+        nextBatch = true;
+        future = execute();
+      } else {
+        execService.shutdown();
+        execService.awaitTermination(1, TimeUnit.HOURS);
+        fileReader.finish();
+      }
+    } catch (Exception ex) {
+      execService.shutdown();
+      fileReader.finish();
+      throw new RuntimeException(ex);
+    }
+    return result;
+  }
+
+  private Future<BatchResult> execute() {
+    return execService.submit(new Callable<BatchResult>() {
+      @Override public BatchResult call() throws QueryExecutionException {
+        BatchResult batchResult = new BatchResult();
+        synchronized (lock) {
+          updateDataBlockIterator();
+          if (dataBlockIterator != null) {
+            batchResult.setRows(dataBlockIterator.next());
+          }
+        }
+        return batchResult;
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/iterator/RawResultIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/iterator/RawResultIterator.java b/core/src/main/java/org/apache/carbondata/scan/result/iterator/RawResultIterator.java
new file mode 100644
index 0000000..4b20627
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/iterator/RawResultIterator.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.result.iterator;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.scan.result.BatchResult;
+import org.apache.carbondata.scan.wrappers.ByteArrayWrapper;
+
+/**
+ * This is a wrapper iterator over the detail raw query iterator.
+ * This iterator will handle the processing of the raw rows.
+ * This will handle the batch results and will iterate on the batches and give single row.
+ */
+public class RawResultIterator extends CarbonIterator<Object[]> {
+
+  private final SegmentProperties sourceSegProperties;
+
+  private final SegmentProperties destinationSegProperties;
+  /**
+   * Iterator of the Batch raw result.
+   */
+  private CarbonIterator<BatchResult> detailRawQueryResultIterator;
+
+  /**
+   * Counter to maintain the row counter.
+   */
+  private int counter = 0;
+
+  private Object[] currentConveretedRawRow = null;
+
+  /**
+   * LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(RawResultIterator.class.getName());
+
+  /**
+   * batch of the result.
+   */
+  private BatchResult batch;
+
+  public RawResultIterator(CarbonIterator<BatchResult> detailRawQueryResultIterator,
+      SegmentProperties sourceSegProperties, SegmentProperties destinationSegProperties) {
+    this.detailRawQueryResultIterator = detailRawQueryResultIterator;
+    this.sourceSegProperties = sourceSegProperties;
+    this.destinationSegProperties = destinationSegProperties;
+  }
+
+  @Override public boolean hasNext() {
+
+    if (null == batch || checkIfBatchIsProcessedCompletely(batch)) {
+      if (detailRawQueryResultIterator.hasNext()) {
+        batch = null;
+        batch = detailRawQueryResultIterator.next();
+        counter = 0; // batch changed so reset the counter.
+      } else {
+        return false;
+      }
+    }
+
+    if (!checkIfBatchIsProcessedCompletely(batch)) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  @Override public Object[] next() {
+    if (null == batch) { // for 1st time
+      batch = detailRawQueryResultIterator.next();
+    }
+    if (!checkIfBatchIsProcessedCompletely(batch)) {
+      try {
+        if(null != currentConveretedRawRow){
+          counter++;
+          Object[] currentConveretedRawRowTemp = this.currentConveretedRawRow;
+          currentConveretedRawRow = null;
+          return currentConveretedRawRowTemp;
+        }
+        return convertRow(batch.getRawRow(counter++));
+      } catch (KeyGenException e) {
+        LOGGER.error(e.getMessage());
+        return null;
+      }
+    } else { // completed one batch.
+      batch = null;
+      batch = detailRawQueryResultIterator.next();
+      counter = 0;
+    }
+    try {
+      if(null != currentConveretedRawRow){
+        counter++;
+        Object[] currentConveretedRawRowTemp = this.currentConveretedRawRow;
+        currentConveretedRawRow = null;
+        return currentConveretedRawRowTemp;
+      }
+
+      return convertRow(batch.getRawRow(counter++));
+    } catch (KeyGenException e) {
+      LOGGER.error(e.getMessage());
+      return null;
+    }
+
+  }
+
+  /**
+   * for fetching the row with out incrementing counter.
+   * @return
+   */
+  public Object[] fetchConverted() throws KeyGenException {
+    if(null != currentConveretedRawRow){
+      return currentConveretedRawRow;
+    }
+    if(hasNext())
+    {
+      Object[] rawRow = batch.getRawRow(counter);
+      currentConveretedRawRow = convertRow(rawRow);;
+      return currentConveretedRawRow;
+    }
+    else
+    {
+      return null;
+    }
+  }
+
+  private Object[] convertRow(Object[] rawRow) throws KeyGenException {
+    byte[] dims = ((ByteArrayWrapper) rawRow[0]).getDictionaryKey();
+    long[] keyArray = sourceSegProperties.getDimensionKeyGenerator().getKeyArray(dims);
+    byte[] covertedBytes =
+        destinationSegProperties.getDimensionKeyGenerator().generateKey(keyArray);
+    ((ByteArrayWrapper) rawRow[0]).setDictionaryKey(covertedBytes);
+    return rawRow;
+  }
+
+  /**
+   * To check if the batch is processed completely
+   * @param batch
+   * @return
+   */
+  private boolean checkIfBatchIsProcessedCompletely(BatchResult batch){
+    if(counter < batch.getSize())
+    {
+      return false;
+    }
+    else{
+      return true;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/scanner/AbstractBlockletScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/scanner/AbstractBlockletScanner.java b/core/src/main/java/org/apache/carbondata/scan/scanner/AbstractBlockletScanner.java
new file mode 100644
index 0000000..caee061
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/scanner/AbstractBlockletScanner.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.scanner;
+
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+
+/**
+ * Blocklet scanner class to process the block
+ */
+public abstract class AbstractBlockletScanner implements BlockletScanner {
+
+  /**
+   * scanner result
+   */
+  protected AbstractScannedResult scannedResult;
+
+  /**
+   * block execution info
+   */
+  protected BlockExecutionInfo blockExecutionInfo;
+
+  public AbstractBlockletScanner(BlockExecutionInfo tableBlockExecutionInfos) {
+    this.blockExecutionInfo = tableBlockExecutionInfos;
+  }
+
+  @Override public AbstractScannedResult scanBlocklet(BlocksChunkHolder blocksChunkHolder)
+      throws QueryExecutionException {
+    fillKeyValue(blocksChunkHolder);
+    return scannedResult;
+  }
+
+  protected void fillKeyValue(BlocksChunkHolder blocksChunkHolder) {
+    scannedResult.reset();
+    scannedResult.setMeasureChunks(blocksChunkHolder.getDataBlock()
+        .getMeasureChunks(blocksChunkHolder.getFileReader(),
+            blockExecutionInfo.getAllSelectedMeasureBlocksIndexes()));
+    scannedResult.setNumberOfRows(blocksChunkHolder.getDataBlock().nodeSize());
+
+    scannedResult.setDimensionChunks(blocksChunkHolder.getDataBlock()
+        .getDimensionChunks(blocksChunkHolder.getFileReader(),
+            blockExecutionInfo.getAllSelectedDimensionBlocksIndexes()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/scanner/BlockletScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/scanner/BlockletScanner.java b/core/src/main/java/org/apache/carbondata/scan/scanner/BlockletScanner.java
new file mode 100644
index 0000000..0337197
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/scanner/BlockletScanner.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.scanner;
+
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+
+/**
+ * Interface for processing the block
+ * Processing can be filter based processing or non filter based processing
+ */
+public interface BlockletScanner {
+
+  /**
+   * Below method will used to process the block data and get the scanned result
+   *
+   * @param blocksChunkHolder block chunk which holds the block data
+   * @return scannerResult
+   * result after processing
+   * @throws QueryExecutionException
+   */
+  AbstractScannedResult scanBlocklet(BlocksChunkHolder blocksChunkHolder)
+      throws QueryExecutionException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/scanner/impl/FilterScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/scanner/impl/FilterScanner.java b/core/src/main/java/org/apache/carbondata/scan/scanner/impl/FilterScanner.java
new file mode 100644
index 0000000..1f63a88
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/scanner/impl/FilterScanner.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.scanner.impl;
+
+import java.util.BitSet;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+import org.apache.carbondata.scan.result.impl.FilterQueryScannedResult;
+import org.apache.carbondata.scan.scanner.AbstractBlockletScanner;
+
+/**
+ * Below class will be used for filter query processing
+ * this class will be first apply the filter then it will read the block if
+ * required and return the scanned result
+ */
+public class FilterScanner extends AbstractBlockletScanner {
+
+  /**
+   * filter tree
+   */
+  private FilterExecuter filterExecuter;
+
+  /**
+   * this will be used to apply min max
+   * this will be useful for dimension column which is on the right side
+   * as node finder will always give tentative blocks, if column data stored individually
+   * and data is in sorted order then we can check whether filter is in the range of min max or not
+   * if it present then only we can apply filter on complete data.
+   * this will be very useful in case of sparse data when rows are
+   * repeating.
+   */
+  private boolean isMinMaxEnabled;
+
+  public FilterScanner(BlockExecutionInfo blockExecutionInfo) {
+    super(blockExecutionInfo);
+    scannedResult = new FilterQueryScannedResult(blockExecutionInfo);
+    // to check whether min max is enabled or not
+    String minMaxEnableValue = CarbonProperties.getInstance()
+        .getProperty(CarbonCommonConstants.CARBON_QUERY_MIN_MAX_ENABLED,
+            CarbonCommonConstants.MIN_MAX_DEFAULT_VALUE);
+    if (null != minMaxEnableValue) {
+      isMinMaxEnabled = Boolean.parseBoolean(minMaxEnableValue);
+    }
+    // get the filter tree
+    this.filterExecuter = blockExecutionInfo.getFilterExecuterTree();
+  }
+
+  /**
+   * Below method will be used to process the block
+   *
+   * @param blocksChunkHolder block chunk holder which holds the data
+   * @throws QueryExecutionException
+   * @throws FilterUnsupportedException
+   */
+  @Override public AbstractScannedResult scanBlocklet(BlocksChunkHolder blocksChunkHolder)
+      throws QueryExecutionException {
+    try {
+      fillScannedResult(blocksChunkHolder);
+    } catch (FilterUnsupportedException e) {
+      throw new QueryExecutionException(e.getMessage());
+    }
+    return scannedResult;
+  }
+
+  /**
+   * This method will process the data in below order
+   * 1. first apply min max on the filter tree and check whether any of the filter
+   * is fall on the range of min max, if not then return empty result
+   * 2. If filter falls on min max range then apply filter on actual
+   * data and get the filtered row index
+   * 3. if row index is empty then return the empty result
+   * 4. if row indexes is not empty then read only those blocks(measure or dimension)
+   * which was present in the query but not present in the filter, as while applying filter
+   * some of the blocks where already read and present in chunk holder so not need to
+   * read those blocks again, this is to avoid reading of same blocks which was already read
+   * 5. Set the blocks and filter indexes to result
+   *
+   * @param blocksChunkHolder
+   * @throws FilterUnsupportedException
+   */
+  private void fillScannedResult(BlocksChunkHolder blocksChunkHolder)
+      throws FilterUnsupportedException {
+
+    scannedResult.reset();
+    // apply min max
+    if (isMinMaxEnabled) {
+      BitSet bitSet = this.filterExecuter
+          .isScanRequired(blocksChunkHolder.getDataBlock().getColumnsMaxValue(),
+              blocksChunkHolder.getDataBlock().getColumnsMinValue());
+      if (bitSet.isEmpty()) {
+        scannedResult.setNumberOfRows(0);
+        scannedResult.setIndexes(new int[0]);
+        return;
+      }
+    }
+    // apply filter on actual data
+    BitSet bitSet = this.filterExecuter.applyFilter(blocksChunkHolder);
+    // if indexes is empty then return with empty result
+    if (bitSet.isEmpty()) {
+      scannedResult.setNumberOfRows(0);
+      scannedResult.setIndexes(new int[0]);
+      return;
+    }
+    // get the row indexes from bot set
+    int[] indexes = new int[bitSet.cardinality()];
+    int index = 0;
+    for (int i = bitSet.nextSetBit(0); i >= 0; i = bitSet.nextSetBit(i + 1)) {
+      indexes[index++] = i;
+    }
+
+    FileHolder fileReader = blocksChunkHolder.getFileReader();
+    int[] allSelectedDimensionBlocksIndexes =
+        blockExecutionInfo.getAllSelectedDimensionBlocksIndexes();
+    DimensionColumnDataChunk[] dimensionColumnDataChunk =
+        new DimensionColumnDataChunk[blockExecutionInfo.getTotalNumberDimensionBlock()];
+    // read dimension chunk blocks from file which is not present
+    for (int i = 0; i < allSelectedDimensionBlocksIndexes.length; i++) {
+      if (null == blocksChunkHolder.getDimensionDataChunk()[allSelectedDimensionBlocksIndexes[i]]) {
+        dimensionColumnDataChunk[allSelectedDimensionBlocksIndexes[i]] =
+            blocksChunkHolder.getDataBlock()
+                .getDimensionChunk(fileReader, allSelectedDimensionBlocksIndexes[i]);
+      } else {
+        dimensionColumnDataChunk[allSelectedDimensionBlocksIndexes[i]] =
+            blocksChunkHolder.getDimensionDataChunk()[allSelectedDimensionBlocksIndexes[i]];
+      }
+    }
+    MeasureColumnDataChunk[] measureColumnDataChunk =
+        new MeasureColumnDataChunk[blockExecutionInfo.getTotalNumberOfMeasureBlock()];
+    int[] allSelectedMeasureBlocksIndexes = blockExecutionInfo.getAllSelectedMeasureBlocksIndexes();
+
+    // read the measure chunk blocks which is not present
+    for (int i = 0; i < allSelectedMeasureBlocksIndexes.length; i++) {
+
+      if (null == blocksChunkHolder.getMeasureDataChunk()[allSelectedMeasureBlocksIndexes[i]]) {
+        measureColumnDataChunk[allSelectedMeasureBlocksIndexes[i]] =
+            blocksChunkHolder.getDataBlock()
+                .getMeasureChunk(fileReader, allSelectedMeasureBlocksIndexes[i]);
+      } else {
+        measureColumnDataChunk[allSelectedMeasureBlocksIndexes[i]] =
+            blocksChunkHolder.getMeasureDataChunk()[allSelectedMeasureBlocksIndexes[i]];
+      }
+    }
+    scannedResult.setDimensionChunks(dimensionColumnDataChunk);
+    scannedResult.setIndexes(indexes);
+    scannedResult.setMeasureChunks(measureColumnDataChunk);
+    scannedResult.setNumberOfRows(indexes.length);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/scanner/impl/NonFilterScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/scanner/impl/NonFilterScanner.java b/core/src/main/java/org/apache/carbondata/scan/scanner/impl/NonFilterScanner.java
new file mode 100644
index 0000000..4d8a8e0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/scanner/impl/NonFilterScanner.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.scanner.impl;
+
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.result.impl.NonFilterQueryScannedResult;
+import org.apache.carbondata.scan.scanner.AbstractBlockletScanner;
+
+/**
+ * Non filter processor which will be used for non filter query
+ * In case of non filter query we just need to read all the blocks requested in the
+ * query and pass it to scanned result
+ */
+public class NonFilterScanner extends AbstractBlockletScanner {
+
+  public NonFilterScanner(BlockExecutionInfo blockExecutionInfo) {
+    super(blockExecutionInfo);
+    // as its a non filter query creating a non filter query scanned result object
+    scannedResult = new NonFilterQueryScannedResult(blockExecutionInfo);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/wrappers/ByteArrayWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/wrappers/ByteArrayWrapper.java b/core/src/main/java/org/apache/carbondata/scan/wrappers/ByteArrayWrapper.java
new file mode 100644
index 0000000..1b3a4c6
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/wrappers/ByteArrayWrapper.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.wrappers;
+
+import org.apache.carbondata.core.util.ByteUtil.UnsafeComparer;
+
+/**
+ * This class will store the dimension column data when query is executed
+ * This can be used as a key for aggregation
+ */
+public class ByteArrayWrapper implements Comparable<ByteArrayWrapper> {
+
+  /**
+   * to store key which is generated using
+   * key generator
+   */
+  protected byte[] dictionaryKey;
+
+  /**
+   * to store no dictionary column data
+   */
+  protected byte[][] complexTypesKeys;
+
+  /**
+   * to store no dictionary column data
+   */
+  protected byte[][] noDictionaryKeys;
+
+  public ByteArrayWrapper() {
+  }
+
+  /**
+   * @return the dictionaryKey
+   */
+  public byte[] getDictionaryKey() {
+    return dictionaryKey;
+  }
+
+  /**
+   * @param dictionaryKey the dictionaryKey to set
+   */
+  public void setDictionaryKey(byte[] dictionaryKey) {
+    this.dictionaryKey = dictionaryKey;
+  }
+
+  /**
+   * @param noDictionaryKeys the noDictionaryKeys to set
+   */
+  public void setNoDictionaryKeys(byte[][] noDictionaryKeys) {
+    this.noDictionaryKeys = noDictionaryKeys;
+  }
+
+  /**
+   * to get the no dictionary column data
+   *
+   * @param index of the no dictionary key
+   * @return no dictionary key for the index
+   */
+  public byte[] getNoDictionaryKeyByIndex(int index) {
+    return this.noDictionaryKeys[index];
+  }
+
+  /**
+   * to get the no dictionary column data
+   *
+   * @param index of the no dictionary key
+   * @return no dictionary key for the index
+   */
+  public byte[] getComplexTypeByIndex(int index) {
+    return this.complexTypesKeys[index];
+  }
+
+  /**
+   * to generate the hash code
+   */
+  @Override public int hashCode() {
+    // first generate the has code of the dictionary column
+    int len = dictionaryKey.length;
+    int result = 1;
+    for (int j = 0; j < len; j++) {
+      result = 31 * result + dictionaryKey[j];
+    }
+    // then no dictionary column
+    for (byte[] directSurrogateValue : noDictionaryKeys) {
+      for (int i = 0; i < directSurrogateValue.length; i++) {
+        result = 31 * result + directSurrogateValue[i];
+      }
+    }
+    // then for complex type
+    for (byte[] complexTypeKey : complexTypesKeys) {
+      for (int i = 0; i < complexTypeKey.length; i++) {
+        result = 31 * result + complexTypeKey[i];
+      }
+    }
+    return result;
+  }
+
+  /**
+   * to validate the two
+   *
+   * @param other object
+   */
+  @Override public boolean equals(Object other) {
+    if (null == other || !(other instanceof ByteArrayWrapper)) {
+      return false;
+    }
+    boolean result = false;
+    // Comparison will be as follows
+    // first compare the no dictionary column
+    // if it is not equal then return false
+    // if it is equal then compare the complex column
+    // if it is also equal then compare dictionary column
+    byte[][] noDictionaryKeysOther = ((ByteArrayWrapper) other).noDictionaryKeys;
+    if (noDictionaryKeysOther.length != noDictionaryKeys.length) {
+      return false;
+    } else {
+      for (int i = 0; i < noDictionaryKeys.length; i++) {
+        result = UnsafeComparer.INSTANCE.equals(noDictionaryKeys[i], noDictionaryKeysOther[i]);
+        if (!result) {
+          return false;
+        }
+      }
+    }
+
+    byte[][] complexTypesKeysOther = ((ByteArrayWrapper) other).complexTypesKeys;
+    if (complexTypesKeysOther.length != complexTypesKeys.length) {
+      return false;
+    } else {
+      for (int i = 0; i < complexTypesKeys.length; i++) {
+        result = UnsafeComparer.INSTANCE.equals(complexTypesKeys[i], complexTypesKeysOther[i]);
+        if (!result) {
+          return false;
+        }
+      }
+    }
+
+    return UnsafeComparer.INSTANCE.equals(dictionaryKey, ((ByteArrayWrapper) other).dictionaryKey);
+  }
+
+  /**
+   * Compare method for ByteArrayWrapper class this will used to compare Two
+   * ByteArrayWrapper data object, basically it will compare two byte array
+   *
+   * @param other ArrayWrapper Object
+   */
+  @Override public int compareTo(ByteArrayWrapper other) {
+    // compare will be as follows
+    //compare dictionary column
+    // then no dictionary column
+    // then complex type column data
+    int compareTo = UnsafeComparer.INSTANCE.compareTo(dictionaryKey, other.dictionaryKey);
+    if (compareTo == 0) {
+      for (int i = 0; i < noDictionaryKeys.length; i++) {
+        compareTo =
+            UnsafeComparer.INSTANCE.compareTo(noDictionaryKeys[i], other.noDictionaryKeys[i]);
+        if (compareTo != 0) {
+          return compareTo;
+        }
+      }
+    }
+    if (compareTo == 0) {
+      for (int i = 0; i < complexTypesKeys.length; i++) {
+        compareTo =
+            UnsafeComparer.INSTANCE.compareTo(complexTypesKeys[i], other.complexTypesKeys[i]);
+        if (compareTo != 0) {
+          return compareTo;
+        }
+      }
+    }
+    return compareTo;
+  }
+
+  /**
+   * @return the complexTypesKeys
+   */
+  public byte[][] getComplexTypesKeys() {
+    return complexTypesKeys;
+  }
+
+  /**
+   * @param complexTypesKeys the complexTypesKeys to set
+   */
+  public void setComplexTypesKeys(byte[][] complexTypesKeys) {
+    this.complexTypesKeys = complexTypesKeys;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/common/ext/ColumnUniqueIdGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/common/ext/ColumnUniqueIdGenerator.java b/core/src/main/java/org/carbondata/common/ext/ColumnUniqueIdGenerator.java
deleted file mode 100644
index bc96302..0000000
--- a/core/src/main/java/org/carbondata/common/ext/ColumnUniqueIdGenerator.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.common.ext;
-
-import java.util.UUID;
-
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-import org.carbondata.core.service.ColumnUniqueIdService;
-
-/**
- * It returns unique id given column
- */
-public class ColumnUniqueIdGenerator implements ColumnUniqueIdService {
-
-  private static ColumnUniqueIdService columnUniqueIdService = new ColumnUniqueIdGenerator();
-
-  @Override public String generateUniqueId(String databaseName, ColumnSchema columnSchema) {
-    return UUID.randomUUID().toString();
-  }
-
-  public static ColumnUniqueIdService getInstance() {
-    return columnUniqueIdService;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/common/ext/DictionaryFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/common/ext/DictionaryFactory.java b/core/src/main/java/org/carbondata/common/ext/DictionaryFactory.java
deleted file mode 100644
index 944f772..0000000
--- a/core/src/main/java/org/carbondata/common/ext/DictionaryFactory.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.common.ext;
-
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.reader.CarbonDictionaryMetadataReader;
-import org.carbondata.core.reader.CarbonDictionaryMetadataReaderImpl;
-import org.carbondata.core.reader.CarbonDictionaryReader;
-import org.carbondata.core.reader.CarbonDictionaryReaderImpl;
-import org.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReader;
-import org.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReaderImpl;
-import org.carbondata.core.service.DictionaryService;
-import org.carbondata.core.writer.CarbonDictionaryWriter;
-import org.carbondata.core.writer.CarbonDictionaryWriterImpl;
-import org.carbondata.core.writer.sortindex.CarbonDictionarySortIndexWriter;
-import org.carbondata.core.writer.sortindex.CarbonDictionarySortIndexWriterImpl;
-
-/**
- * service to get dictionary reader and writer
- */
-public class DictionaryFactory implements DictionaryService {
-
-  private static DictionaryService dictService = new DictionaryFactory();
-
-  /**
-   * get dictionary writer
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  @Override public CarbonDictionaryWriter getDictionaryWriter(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath) {
-    return new CarbonDictionaryWriterImpl(carbonStorePath, carbonTableIdentifier, columnIdentifier);
-  }
-
-  /**
-   * get dictionary sort index writer
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  @Override public CarbonDictionarySortIndexWriter getDictionarySortIndexWriter(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath) {
-    return new CarbonDictionarySortIndexWriterImpl(carbonTableIdentifier, columnIdentifier,
-        carbonStorePath);
-  }
-
-  /**
-   * get dictionary metadata reader
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  @Override public CarbonDictionaryMetadataReader getDictionaryMetadataReader(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath) {
-    return new CarbonDictionaryMetadataReaderImpl(carbonStorePath, carbonTableIdentifier,
-        columnIdentifier);
-  }
-
-  /**
-   * get dictionary reader
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  @Override public CarbonDictionaryReader getDictionaryReader(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath) {
-    return new CarbonDictionaryReaderImpl(carbonStorePath, carbonTableIdentifier, columnIdentifier);
-  }
-
-  /**
-   * get dictionary sort index reader
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  @Override public CarbonDictionarySortIndexReader getDictionarySortIndexReader(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath) {
-    return new CarbonDictionarySortIndexReaderImpl(carbonTableIdentifier, columnIdentifier,
-        carbonStorePath);
-  }
-
-  public static DictionaryService getInstance() {
-    return dictService;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/common/ext/PathFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/common/ext/PathFactory.java b/core/src/main/java/org/carbondata/common/ext/PathFactory.java
deleted file mode 100644
index 8b64aec..0000000
--- a/core/src/main/java/org/carbondata/common/ext/PathFactory.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.common.ext;
-
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.carbon.path.CarbonStorePath;
-import org.carbondata.core.carbon.path.CarbonTablePath;
-import org.carbondata.core.service.PathService;
-
-/**
- * Create helper to get path details
- */
-public class PathFactory implements PathService {
-
-  private static PathService pathService = new PathFactory();
-
-  /**
-   * @param columnIdentifier
-   * @param storeLocation
-   * @param tableIdentifier
-   * @return store path related to tables
-   */
-  @Override public CarbonTablePath getCarbonTablePath(ColumnIdentifier columnIdentifier,
-      String storeLocation, CarbonTableIdentifier tableIdentifier) {
-    return CarbonStorePath.getCarbonTablePath(storeLocation, tableIdentifier);
-  }
-
-  public static PathService getInstance() {
-    return pathService;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/common/factory/CarbonCommonFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/common/factory/CarbonCommonFactory.java b/core/src/main/java/org/carbondata/common/factory/CarbonCommonFactory.java
deleted file mode 100644
index dfa14f9..0000000
--- a/core/src/main/java/org/carbondata/common/factory/CarbonCommonFactory.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.common.factory;
-
-import org.carbondata.common.ext.ColumnUniqueIdGenerator;
-import org.carbondata.common.ext.DictionaryFactory;
-import org.carbondata.common.ext.PathFactory;
-import org.carbondata.core.service.ColumnUniqueIdService;
-import org.carbondata.core.service.DictionaryService;
-import org.carbondata.core.service.PathService;
-
-/**
- * Interface to get services
- */
-public class CarbonCommonFactory {
-
-  /**
-   * @return dictionary service
-   */
-  public static DictionaryService getDictionaryService() {
-    return DictionaryFactory.getInstance();
-  }
-
-  /**
-   * @return path service
-   */
-  public static PathService getPathService() {
-    return PathFactory.getInstance();
-  }
-
-  /**
-   * @return unique id generator
-   */
-  public static ColumnUniqueIdService getColumnUniqueIdGenerator() {
-    return ColumnUniqueIdGenerator.getInstance();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/Cache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/Cache.java b/core/src/main/java/org/carbondata/core/cache/Cache.java
deleted file mode 100644
index d2985bd..0000000
--- a/core/src/main/java/org/carbondata/core/cache/Cache.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache;
-
-import java.util.List;
-
-import org.carbondata.core.util.CarbonUtilException;
-
-/**
- * A semi-persistent mapping from keys to values. Cache entries are manually added using
- * #get(Key), #getAll(List<Keys>) , and are stored in the cache until
- * either evicted or manually invalidated.
- * Implementations of this interface are expected to be thread-safe, and can be safely accessed
- * by multiple concurrent threads.
- */
-public interface Cache<K, V> {
-
-  /**
-   * This method will get the value for the given key. If value does not exist
-   * for the given key, it will check and load the value.
-   *
-   * @param key
-   * @return
-   * @throws CarbonUtilException in case memory is not sufficient to load data into memory
-   */
-  V get(K key) throws CarbonUtilException;
-
-  /**
-   * This method will return a list of values for the given list of keys.
-   * For each key, this method will check and load the data if required.
-   *
-   * @param keys
-   * @return
-   * @throws CarbonUtilException in case memory is not sufficient to load data into memory
-   */
-  List<V> getAll(List<K> keys) throws CarbonUtilException;
-
-  /**
-   * This method will return the value for the given key. It will not check and load
-   * the data for the given key
-   *
-   * @param key
-   * @return
-   */
-  V getIfPresent(K key);
-
-  /**
-   * This method will remove the cache for a given key
-   *
-   * @param key
-   */
-  void invalidate(K key);
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/CacheProvider.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/CacheProvider.java b/core/src/main/java/org/carbondata/core/cache/CacheProvider.java
deleted file mode 100644
index ad9857a..0000000
--- a/core/src/main/java/org/carbondata/core/cache/CacheProvider.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
-import org.carbondata.core.cache.dictionary.ForwardDictionaryCache;
-import org.carbondata.core.cache.dictionary.ReverseDictionaryCache;
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * Cache provider class which will create a cache based on given type
- */
-public class CacheProvider {
-
-  /**
-   * cache provider instance
-   */
-  private static CacheProvider cacheProvider = new CacheProvider();
-
-  /**
-   * a map that will hold the entry for cache type to cache object mapping
-   */
-  private Map<CacheType, Cache> cacheTypeToCacheMap =
-      new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-
-  /**
-   * a map that will hold the mapping of cache type to LRU cache instance
-   */
-  private Map<CacheType, CarbonLRUCache> cacheTypeToLRUCacheMap =
-      new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-
-  /**
-   * object lock instance to be used in synchronization block
-   */
-  private final Object lock = new Object();
-
-  /**
-   * private constructor to follow singleton design pattern for this class
-   */
-  private CacheProvider() {
-
-  }
-
-  /**
-   * @return cache provider instance
-   */
-  public static CacheProvider getInstance() {
-    return cacheProvider;
-  }
-
-  /**
-   * This method will check if a cache already exists for given cache type and create in case
-   * it is not present in the map
-   *
-   * @param cacheType       type of cache
-   * @param carbonStorePath store path
-   * @param <K>
-   * @param <V>
-   * @return
-   */
-  public <K, V> Cache<K, V> createCache(CacheType cacheType, String carbonStorePath) {
-    //check if lru cache is null, if null create one
-    //check if cache is null for given cache type, if null create one
-    if (!dictionaryCacheAlreadyExists(cacheType)) {
-      synchronized (lock) {
-        if (!dictionaryCacheAlreadyExists(cacheType)) {
-          if (null == cacheTypeToLRUCacheMap.get(cacheType)) {
-            createLRULevelCacheInstance(cacheType);
-          }
-          createDictionaryCacheForGivenType(cacheType, carbonStorePath);
-        }
-      }
-    }
-    return cacheTypeToCacheMap.get(cacheType);
-  }
-
-  /**
-   * This method will create the cache for given cache type
-   *
-   * @param cacheType       type of cache
-   * @param carbonStorePath store path
-   */
-  private void createDictionaryCacheForGivenType(CacheType cacheType, String carbonStorePath) {
-    Cache cacheObject = null;
-    if (cacheType.equals(CacheType.REVERSE_DICTIONARY)) {
-      cacheObject =
-          new ReverseDictionaryCache<DictionaryColumnUniqueIdentifier, Dictionary>(carbonStorePath,
-              cacheTypeToLRUCacheMap.get(cacheType));
-    } else if (cacheType.equals(CacheType.FORWARD_DICTIONARY)) {
-      cacheObject =
-          new ForwardDictionaryCache<DictionaryColumnUniqueIdentifier, Dictionary>(carbonStorePath,
-              cacheTypeToLRUCacheMap.get(cacheType));
-    }
-    cacheTypeToCacheMap.put(cacheType, cacheObject);
-  }
-
-  /**
-   * This method will create the lru cache instance based on the given type
-   *
-   * @param cacheType
-   */
-  private void createLRULevelCacheInstance(CacheType cacheType) {
-    CarbonLRUCache carbonLRUCache = null;
-    // if cache type is dictionary cache, then same lru cache instance has to be shared
-    // between forward and reverse cache
-    if (cacheType.equals(CacheType.REVERSE_DICTIONARY) || cacheType
-        .equals(CacheType.FORWARD_DICTIONARY)) {
-      carbonLRUCache = new CarbonLRUCache(CarbonCommonConstants.CARBON_MAX_LEVEL_CACHE_SIZE,
-          CarbonCommonConstants.CARBON_MAX_LEVEL_CACHE_SIZE_DEFAULT);
-      cacheTypeToLRUCacheMap.put(CacheType.REVERSE_DICTIONARY, carbonLRUCache);
-      cacheTypeToLRUCacheMap.put(CacheType.FORWARD_DICTIONARY, carbonLRUCache);
-    }
-  }
-
-  /**
-   * This method will check whether the map already has an entry for
-   * given cache type
-   *
-   * @param cacheType
-   * @return
-   */
-  private boolean dictionaryCacheAlreadyExists(CacheType cacheType) {
-    return null != cacheTypeToCacheMap.get(cacheType);
-  }
-
-  /**
-   * Below method will be used to clear the cache
-   */
-  public void dropAllCache() {
-    cacheTypeToLRUCacheMap.clear();
-    cacheTypeToCacheMap.clear();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/CacheType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/CacheType.java b/core/src/main/java/org/carbondata/core/cache/CacheType.java
deleted file mode 100644
index d07daf8..0000000
--- a/core/src/main/java/org/carbondata/core/cache/CacheType.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache;
-
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
-
-/**
- * class which defines different cache types. cache type can be dictionary cache for
- * forward (surrogate key to byte array mapping) and reverse (byte array to
- * surrogate mapping) dictionary or a B-tree cache
- */
-public class CacheType<K, V> {
-
-  /**
-   * Forward dictionary cache which maintains surrogate key to byte array mapping
-   */
-  public static final CacheType<DictionaryColumnUniqueIdentifier, Dictionary> FORWARD_DICTIONARY =
-      new CacheType("forward_dictionary");
-
-  /**
-   * Reverse dictionary cache which maintains byte array to surrogate key mapping
-   */
-  public static final CacheType<DictionaryColumnUniqueIdentifier, Dictionary> REVERSE_DICTIONARY =
-      new CacheType("reverse_dictionary");
-
-  /**
-   * cacheName which is unique name for a cache
-   */
-  private String cacheName;
-
-  /**
-   * @param cacheName
-   */
-  private CacheType(String cacheName) {
-    this.cacheName = cacheName;
-  }
-
-  /**
-   * @return cache unique name
-   */
-  public String getCacheName() {
-    return cacheName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/Cacheable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/Cacheable.java b/core/src/main/java/org/carbondata/core/cache/Cacheable.java
deleted file mode 100644
index e0cc390..0000000
--- a/core/src/main/java/org/carbondata/core/cache/Cacheable.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache;
-
-/**
- * interface which declares methods which will decide whether to keep
- * cacheable objects in memory
- */
-public interface Cacheable {
-
-  /**
-   * This method will return the timestamp of file based on which decision
-   * the decision will be taken whether to read that file or not
-   *
-   * @return
-   */
-  long getFileTimeStamp();
-
-  /**
-   * This method will return the access count for a column based on which decision will be taken
-   * whether to keep the object in memory
-   *
-   * @return
-   */
-  int getAccessCount();
-
-  /**
-   * This method will return the memory size of a column
-   *
-   * @return
-   */
-  long getMemorySize();
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/CarbonLRUCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/CarbonLRUCache.java b/core/src/main/java/org/carbondata/core/cache/CarbonLRUCache.java
deleted file mode 100644
index 72ee209..0000000
--- a/core/src/main/java/org/carbondata/core/cache/CarbonLRUCache.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache;
-
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.CarbonProperties;
-
-/**
- * class which manages the lru cache
- */
-public final class CarbonLRUCache {
-  /**
-   * constant for converting MB into bytes
-   */
-  private static final int BYTE_CONVERSION_CONSTANT = 1024 * 1024;
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(CarbonLRUCache.class.getName());
-  /**
-   * Map that will contain key as table unique name and value as cache Holder
-   * object
-   */
-  private Map<String, Cacheable> lruCacheMap;
-  /**
-   * lruCacheSize
-   */
-  private long lruCacheMemorySize;
-  /**
-   * totalSize size of the cache
-   */
-  private long currentSize;
-
-  /**
-   * @param propertyName        property name to take the size configured
-   * @param defaultPropertyName default property in case size is not configured
-   */
-  public CarbonLRUCache(String propertyName, String defaultPropertyName) {
-    try {
-      lruCacheMemorySize = Integer
-          .parseInt(CarbonProperties.getInstance().getProperty(propertyName, defaultPropertyName));
-    } catch (NumberFormatException e) {
-      lruCacheMemorySize = Integer.parseInt(defaultPropertyName);
-    }
-    initCache();
-    if (lruCacheMemorySize > 0) {
-      LOGGER.info("Configured level cahce size is " + lruCacheMemorySize + " MB");
-      // convert in bytes
-      lruCacheMemorySize = lruCacheMemorySize * BYTE_CONVERSION_CONSTANT;
-    } else {
-      LOGGER.info("Column cache size not configured. Therefore default behavior will be "
-              + "considered and no LRU based eviction of columns will be done");
-    }
-  }
-
-  /**
-   * initialize lru cache
-   */
-  private void initCache() {
-    lruCacheMap =
-        new LinkedHashMap<String, Cacheable>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE, 1.0f,
-            true);
-  }
-
-  /**
-   * This method will give the list of all the keys that can be deleted from
-   * the level LRU cache
-   */
-  private List<String> getKeysToBeRemoved(long size) {
-    List<String> toBeDeletedKeys =
-        new ArrayList<String>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    long removedSize = 0;
-    for (Entry<String, Cacheable> entry : lruCacheMap.entrySet()) {
-      String key = entry.getKey();
-      Cacheable cacheInfo = entry.getValue();
-      long memorySize = cacheInfo.getMemorySize();
-      if (canBeRemoved(cacheInfo)) {
-        removedSize = removedSize + memorySize;
-        toBeDeletedKeys.add(key);
-        // check if after removing the current file size, required
-        // size when added to current size is sufficient to load a
-        // level or not
-        if (lruCacheMemorySize >= (currentSize - memorySize + size)) {
-          toBeDeletedKeys.clear();
-          toBeDeletedKeys.add(key);
-          removedSize = memorySize;
-          break;
-        }
-        // check if after removing the added size/removed size,
-        // required size when added to current size is sufficient to
-        // load a level or not
-        else if (lruCacheMemorySize >= (currentSize - removedSize + size)) {
-          break;
-        }
-      }
-    }
-    // this case will come when iteration is complete over the keys but
-    // still size is not sufficient for level file to be loaded, then we
-    // will not delete any of the keys
-    if ((currentSize - removedSize + size) > lruCacheMemorySize) {
-      toBeDeletedKeys.clear();
-    }
-    return toBeDeletedKeys;
-  }
-
-  /**
-   * @param cacheInfo
-   * @return
-   */
-  private boolean canBeRemoved(Cacheable cacheInfo) {
-    if (cacheInfo.getAccessCount() > 0) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * @param key
-   */
-  public void remove(String key) {
-    synchronized (lruCacheMap) {
-      removeKey(key);
-    }
-  }
-
-  /**
-   * This method will remove the key from lru cache
-   *
-   * @param key
-   */
-  private void removeKey(String key) {
-    Cacheable cacheable = lruCacheMap.get(key);
-    if (null != cacheable) {
-      currentSize = currentSize - cacheable.getMemorySize();
-    }
-    lruCacheMap.remove(key);
-    LOGGER.info("Removed level entry from InMemory level lru cache :: " + key);
-  }
-
-  /**
-   * This method will check if required size is available in the memory and then add
-   * the given cacheable to object to lru cache
-   *
-   * @param columnIdentifier
-   * @param cacheInfo
-   */
-  public boolean put(String columnIdentifier, Cacheable cacheInfo, long requiredSize) {
-    boolean columnKeyAddedSuccessfully = false;
-    if (freeMemorySizeForAddingCache(requiredSize)) {
-      synchronized (lruCacheMap) {
-        currentSize = currentSize + requiredSize;
-        if (null == lruCacheMap.get(columnIdentifier)) {
-          lruCacheMap.put(columnIdentifier, cacheInfo);
-        }
-        columnKeyAddedSuccessfully = true;
-      }
-      LOGGER.debug("Added level entry to InMemory level lru cache :: " + columnIdentifier);
-    } else {
-      LOGGER.error("Size not available. Column cannot be added to level lru cache :: "
-          + columnIdentifier + " .Required Size = " + requiredSize + " Size available "
-          + (lruCacheMemorySize - currentSize));
-    }
-    return columnKeyAddedSuccessfully;
-  }
-
-  /**
-   * This method will check a required column can be loaded into memory or not. If required
-   * this method will call for eviction of existing data from memory
-   *
-   * @param requiredSize
-   * @return
-   */
-  private boolean freeMemorySizeForAddingCache(long requiredSize) {
-    boolean memoryAvailable = false;
-    if (lruCacheMemorySize > 0) {
-      if (isSizeAvailableToLoadColumnDictionary(requiredSize)) {
-        memoryAvailable = true;
-      } else {
-        synchronized (lruCacheMap) {
-          // get the keys that can be removed from memory
-          List<String> keysToBeRemoved = getKeysToBeRemoved(requiredSize);
-          for (String cacheKey : keysToBeRemoved) {
-            removeKey(cacheKey);
-          }
-          // after removing the keys check again if required size is available
-          if (isSizeAvailableToLoadColumnDictionary(requiredSize)) {
-            memoryAvailable = true;
-          }
-        }
-      }
-    } else {
-      memoryAvailable = true;
-    }
-    return memoryAvailable;
-  }
-
-  /**
-   * This method will check if size is available to laod dictionary into memory
-   *
-   * @param requiredSize
-   * @return
-   */
-  private boolean isSizeAvailableToLoadColumnDictionary(long requiredSize) {
-    return lruCacheMemorySize >= (currentSize + requiredSize);
-  }
-
-  /**
-   * @param key
-   * @return
-   */
-  public Cacheable get(String key) {
-    synchronized (lruCacheMap) {
-      return lruCacheMap.get(key);
-    }
-  }
-
-  /**
-   * This method will empty the level cache
-   */
-  public void clear() {
-    synchronized (lruCacheMap) {
-      lruCacheMap.clear();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java b/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
deleted file mode 100644
index 5e20603..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.nio.charset.Charset;
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * class that implements cacheable interface and methods specific to column dictionary
- */
-public abstract class AbstractColumnDictionaryInfo implements DictionaryInfo {
-
-  /**
-   * list that will hold all the dictionary chunks for one column
-   */
-  protected List<List<byte[]>> dictionaryChunks = new CopyOnWriteArrayList<>();
-
-  /**
-   * minimum value of surrogate key, dictionary value key will start from count 1
-   */
-  protected static final int MINIMUM_SURROGATE_KEY = 1;
-
-  /**
-   * atomic integer to maintain the access count for a column access
-   */
-  protected AtomicInteger accessCount = new AtomicInteger();
-
-  /**
-   * file timestamp
-   */
-  protected long fileTimeStamp;
-
-  /**
-   * offset till where file is read
-   */
-  protected long offsetTillFileIsRead;
-
-  /**
-   * length of dictionary metadata file
-   */
-  private long dictionaryMetaFileLength;
-
-  /**
-   * This method will return the timestamp of file based on which decision
-   * the decision will be taken whether to read that file or not
-   *
-   * @return
-   */
-  @Override public long getFileTimeStamp() {
-    return fileTimeStamp;
-  }
-
-  /**
-   * This method will return the access count for a column based on which decision will be taken
-   * whether to keep the object in memory
-   *
-   * @return
-   */
-  @Override public int getAccessCount() {
-    return accessCount.get();
-  }
-
-  /**
-   * This method will return the memory size of a column
-   *
-   * @return
-   */
-  @Override public long getMemorySize() {
-    return offsetTillFileIsRead;
-  }
-
-  /**
-   * This method will increment the access count for a column by 1
-   * whenever a column is getting used in query or incremental data load
-   */
-  @Override public void incrementAccessCount() {
-    accessCount.incrementAndGet();
-  }
-
-  /**
-   * This method will decrement the access count for a column by 1
-   * whenever a column usage is complete
-   */
-  private void decrementAccessCount() {
-    if (accessCount.get() > 0) {
-      accessCount.decrementAndGet();
-    }
-  }
-
-  /**
-   * This method will update the end offset of file everytime a file is read
-   *
-   * @param offsetTillFileIsRead
-   */
-  @Override public void setOffsetTillFileIsRead(long offsetTillFileIsRead) {
-    this.offsetTillFileIsRead = offsetTillFileIsRead;
-  }
-
-  /**
-   * This method will update the timestamp of a file if a file is modified
-   * like in case of incremental load
-   *
-   * @param fileTimeStamp
-   */
-  @Override public void setFileTimeStamp(long fileTimeStamp) {
-    this.fileTimeStamp = fileTimeStamp;
-  }
-
-  /**
-   * The method return the list of dictionary chunks of a column
-   * Applications Scenario.
-   * For preparing the column Sort info while writing the sort index file.
-   *
-   * @return
-   */
-  @Override public DictionaryChunksWrapper getDictionaryChunks() {
-    DictionaryChunksWrapper chunksWrapper = new DictionaryChunksWrapper(dictionaryChunks);
-    return chunksWrapper;
-  }
-
-  /**
-   * This method will release the objects and set default value for primitive types
-   */
-  @Override public void clear() {
-    decrementAccessCount();
-  }
-
-  /**
-   * This method will find and return the sort index for a given dictionary id.
-   * Applicable scenarios:
-   * 1. Used in case of order by queries when data sorting is required
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return if found returns key else 0
-   */
-  @Override public int getSortedIndex(int surrogateKey) {
-    return 0;
-  }
-
-  /**
-   * dictionary metadata file length which will be set whenever we reload dictionary
-   * data from disk
-   *
-   * @param dictionaryMetaFileLength length of dictionary metadata file
-   */
-  @Override public void setDictionaryMetaFileLength(long dictionaryMetaFileLength) {
-    this.dictionaryMetaFileLength = dictionaryMetaFileLength;
-  }
-
-  /**
-   * Dictionary meta file offset which will be read to check whether length of dictionary
-   * meta file has been modified
-   *
-   * @return
-   */
-  @Override public long getDictionaryMetaFileLength() {
-    return dictionaryMetaFileLength;
-  }
-
-  /**
-   * This method will find and return the dictionary value from sorted index.
-   * Applicable scenarios:
-   * 1. Query final result preparation in case of order by queries:
-   * While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param sortedIndex sort index of dictionary value
-   * @return value if found else null
-   */
-  @Override public String getDictionaryValueFromSortedIndex(int sortedIndex) {
-    return null;
-  }
-
-  /**
-   * This method will set the sort order index of a dictionary column.
-   * Sort order index if the index of dictionary values after they are sorted.
-   *
-   * @param sortOrderIndex
-   */
-  @Override public void setSortOrderIndex(List<Integer> sortOrderIndex) {
-  }
-
-  /**
-   * This method will set the sort reverse index of a dictionary column.
-   * Sort reverse index is the index of dictionary values before they are sorted.
-   *
-   * @param sortReverseOrderIndex
-   */
-  @Override public void setSortReverseOrderIndex(List<Integer> sortReverseOrderIndex) {
-  }
-
-  /**
-   * This method will find and return the dictionary value for a given surrogate key.
-   * Applicable scenarios:
-   * 1. Query final result preparation : While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return value if found else null
-   */
-  @Override public String getDictionaryValueForKey(int surrogateKey) {
-    String dictionaryValue = null;
-    if (surrogateKey < MINIMUM_SURROGATE_KEY) {
-      return dictionaryValue;
-    }
-    byte[] dictionaryValueInBytes = getDictionaryBytesFromSurrogate(surrogateKey);
-    if (null != dictionaryValueInBytes) {
-      dictionaryValue = new String(dictionaryValueInBytes,
-          Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
-    }
-    return dictionaryValue;
-  }
-
-  /**
-   * This method will find and return the dictionary value as byte array for a
-   * given surrogate key
-   *
-   * @param surrogateKey
-   * @return
-   */
-  protected byte[] getDictionaryBytesFromSurrogate(int surrogateKey) {
-    byte[] dictionaryValueInBytes = null;
-    int totalSizeOfDictionaryChunksTraversed = 0;
-    for (List<byte[]> oneDictionaryChunk : dictionaryChunks) {
-      totalSizeOfDictionaryChunksTraversed =
-          totalSizeOfDictionaryChunksTraversed + oneDictionaryChunk.size();
-      // skip the dictionary chunk till surrogate key is lesser than size of
-      // dictionary chunks traversed
-      if (totalSizeOfDictionaryChunksTraversed < surrogateKey) {
-        continue;
-      }
-      // lets say surrogateKey = 26, total size traversed is 28, dictionary chunk size = 12
-      // then surrogate position in dictionary chunk list is = 26 - (28-12) - 1 = 9
-      // -1 because list index starts from 0
-      int surrogatePositionInDictionaryChunk =
-          surrogateKey - (totalSizeOfDictionaryChunksTraversed - oneDictionaryChunk.size()) - 1;
-      dictionaryValueInBytes = oneDictionaryChunk.get(surrogatePositionInDictionaryChunk);
-      break;
-    }
-    return dictionaryValueInBytes;
-  }
-
-  /**
-   * This method will find and return the surrogate key for a given dictionary value
-   * Applicable scenario:
-   * 1. Incremental data load : Dictionary will not be generated for existing values. For
-   * that values have to be looked up in the existing dictionary cache.
-   * 2. Filter scenarios where from value surrogate key has to be found.
-   *
-   * @param value dictionary value
-   * @return if found returns key else 0
-   */
-  @Override public int getSurrogateKey(String value) {
-    byte[] keyData = value.getBytes(Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
-    return getSurrogateKey(keyData);
-  }
-}
-


[47/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BlockIndexStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BlockIndexStore.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BlockIndexStore.java
new file mode 100644
index 0000000..be48ce5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BlockIndexStore.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.AbstractIndex;
+import org.apache.carbondata.core.carbon.datastore.block.BlockIndex;
+import org.apache.carbondata.core.carbon.datastore.block.TableBlockInfo;
+import org.apache.carbondata.core.carbon.datastore.exception.IndexBuilderException;
+import org.apache.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.CarbonUtilException;
+
+/**
+ * Singleton Class to handle loading, unloading,clearing,storing of the table
+ * blocks
+ */
+public class BlockIndexStore {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(BlockIndexStore.class.getName());
+  /**
+   * singleton instance
+   */
+  private static final BlockIndexStore CARBONTABLEBLOCKSINSTANCE = new BlockIndexStore();
+
+  /**
+   * map to hold the table and its list of blocks
+   */
+  private Map<AbsoluteTableIdentifier, Map<TableBlockInfo, AbstractIndex>> tableBlocksMap;
+
+  /**
+   * map of block info to lock object map, while loading the btree this will be filled
+   * and removed after loading the tree for that particular block info, this will be useful
+   * while loading the tree concurrently so only block level lock will be applied another
+   * block can be loaded concurrently
+   */
+  private Map<TableBlockInfo, Object> blockInfoLock;
+
+  /**
+   * table and its lock object to this will be useful in case of concurrent
+   * query scenario when more than one query comes for same table and in that
+   * case it will ensure that only one query will able to load the blocks
+   */
+  private Map<AbsoluteTableIdentifier, Object> tableLockMap;
+
+  private BlockIndexStore() {
+    tableBlocksMap =
+        new ConcurrentHashMap<AbsoluteTableIdentifier, Map<TableBlockInfo, AbstractIndex>>(
+            CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    tableLockMap = new ConcurrentHashMap<AbsoluteTableIdentifier, Object>(
+        CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    blockInfoLock = new ConcurrentHashMap<TableBlockInfo, Object>();
+  }
+
+  /**
+   * Return the instance of this class
+   *
+   * @return singleton instance
+   */
+  public static BlockIndexStore getInstance() {
+    return CARBONTABLEBLOCKSINSTANCE;
+  }
+
+  /**
+   * below method will be used to load the block which are not loaded and to
+   * get the loaded blocks if all the blocks which are passed is loaded then
+   * it will not load , else it will load.
+   *
+   * @param tableBlocksInfos        list of blocks to be loaded
+   * @param absoluteTableIdentifier absolute Table Identifier to identify the table
+   * @throws IndexBuilderException
+   */
+  public List<AbstractIndex> loadAndGetBlocks(List<TableBlockInfo> tableBlocksInfos,
+      AbsoluteTableIdentifier absoluteTableIdentifier) throws IndexBuilderException {
+    AbstractIndex[] loadedBlock = new AbstractIndex[tableBlocksInfos.size()];
+    addTableLockObject(absoluteTableIdentifier);
+    // sort the block info
+    // so block will be loaded in sorted order this will be required for
+    // query execution
+    Collections.sort(tableBlocksInfos);
+    // get the instance
+    Object lockObject = tableLockMap.get(absoluteTableIdentifier);
+    Map<TableBlockInfo, AbstractIndex> tableBlockMapTemp = null;
+    int numberOfCores = 1;
+    try {
+      numberOfCores = Integer.parseInt(CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.NUM_CORES,
+              CarbonCommonConstants.NUM_CORES_DEFAULT_VAL));
+    } catch (NumberFormatException e) {
+      numberOfCores = Integer.parseInt(CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
+    }
+    ExecutorService executor = Executors.newFixedThreadPool(numberOfCores);
+    // Acquire the lock to ensure only one query is loading the table blocks
+    // if same block is assigned to both the queries
+    synchronized (lockObject) {
+      tableBlockMapTemp = tableBlocksMap.get(absoluteTableIdentifier);
+      // if it is loading for first time
+      if (null == tableBlockMapTemp) {
+        tableBlockMapTemp = new ConcurrentHashMap<TableBlockInfo, AbstractIndex>();
+        tableBlocksMap.put(absoluteTableIdentifier, tableBlockMapTemp);
+      }
+    }
+    AbstractIndex tableBlock = null;
+    List<Future<AbstractIndex>> blocksList = new ArrayList<Future<AbstractIndex>>();
+    int counter = -1;
+    for (TableBlockInfo blockInfo : tableBlocksInfos) {
+      counter++;
+      // if table block is already loaded then do not load
+      // that block
+      tableBlock = tableBlockMapTemp.get(blockInfo);
+      // if block is not loaded
+      if (null == tableBlock) {
+        // check any lock object is present in
+        // block info lock map
+        Object blockInfoLockObject = blockInfoLock.get(blockInfo);
+        // if lock object is not present then acquire
+        // the lock in block info lock and add a lock object in the map for
+        // particular block info, added double checking mechanism to add the lock
+        // object so in case of concurrent query we for same block info only one lock
+        // object will be added
+        if (null == blockInfoLockObject) {
+          synchronized (blockInfoLock) {
+            // again checking the block info lock, to check whether lock object is present
+            // or not if now also not present then add a lock object
+            blockInfoLockObject = blockInfoLock.get(blockInfo);
+            if (null == blockInfoLockObject) {
+              blockInfoLockObject = new Object();
+              blockInfoLock.put(blockInfo, blockInfoLockObject);
+            }
+          }
+        }
+        //acquire the lock for particular block info
+        synchronized (blockInfoLockObject) {
+          // check again whether block is present or not to avoid the
+          // same block is loaded
+          //more than once in case of concurrent query
+          tableBlock = tableBlockMapTemp.get(blockInfo);
+          // if still block is not present then load the block
+          if (null == tableBlock) {
+            blocksList.add(executor.submit(new BlockLoaderThread(blockInfo, tableBlockMapTemp)));
+          }
+        }
+      } else {
+        // if blocks is already loaded then directly set the block at particular position
+        //so block will be present in sorted order
+        loadedBlock[counter] = tableBlock;
+      }
+    }
+    // shutdown the executor gracefully and wait until all the task is finished
+    executor.shutdown();
+    try {
+      executor.awaitTermination(1, TimeUnit.HOURS);
+    } catch (InterruptedException e) {
+      throw new IndexBuilderException(e);
+    }
+    // fill the block which were not loaded before to loaded blocks array
+    fillLoadedBlocks(loadedBlock, blocksList);
+    return Arrays.asList(loadedBlock);
+  }
+
+  /**
+   * Below method will be used to fill the loaded blocks to the array
+   * which will be used for query execution
+   *
+   * @param loadedBlockArray array of blocks which will be filled
+   * @param blocksList       blocks loaded in thread
+   * @throws IndexBuilderException in case of any failure
+   */
+  private void fillLoadedBlocks(AbstractIndex[] loadedBlockArray,
+      List<Future<AbstractIndex>> blocksList) throws IndexBuilderException {
+    int blockCounter = 0;
+    for (int i = 0; i < loadedBlockArray.length; i++) {
+      if (null == loadedBlockArray[i]) {
+        try {
+          loadedBlockArray[i] = blocksList.get(blockCounter++).get();
+        } catch (InterruptedException | ExecutionException e) {
+          throw new IndexBuilderException(e);
+        }
+      }
+
+    }
+  }
+
+  private AbstractIndex loadBlock(Map<TableBlockInfo, AbstractIndex> tableBlockMapTemp,
+      TableBlockInfo blockInfo) throws CarbonUtilException {
+    AbstractIndex tableBlock;
+    DataFileFooter footer;
+    // getting the data file meta data of the block
+    footer = CarbonUtil.readMetadatFile(blockInfo.getFilePath(), blockInfo.getBlockOffset(),
+        blockInfo.getBlockLength());
+    tableBlock = new BlockIndex();
+    footer.setTableBlockInfo(blockInfo);
+    // building the block
+    tableBlock.buildIndex(Arrays.asList(footer));
+    tableBlockMapTemp.put(blockInfo, tableBlock);
+    // finally remove the lock object from block info lock as once block is loaded
+    // it will not come inside this if condition
+    blockInfoLock.remove(blockInfo);
+    return tableBlock;
+  }
+
+  /**
+   * Method to add table level lock if lock is not present for the table
+   *
+   * @param absoluteTableIdentifier
+   */
+  private synchronized void addTableLockObject(AbsoluteTableIdentifier absoluteTableIdentifier) {
+    // add the instance to lock map if it is not present
+    if (null == tableLockMap.get(absoluteTableIdentifier)) {
+      tableLockMap.put(absoluteTableIdentifier, new Object());
+    }
+  }
+
+  /**
+   * This will be used to remove a particular blocks useful in case of
+   * deletion of some of the blocks in case of retention or may be some other
+   * scenario
+   *
+   * @param removeTableBlocksInfos  blocks to be removed
+   * @param absoluteTableIdentifier absolute table identifier
+   */
+  public void removeTableBlocks(List<TableBlockInfo> removeTableBlocksInfos,
+      AbsoluteTableIdentifier absoluteTableIdentifier) {
+    // get the lock object if lock object is not present then it is not
+    // loaded at all
+    // we can return from here
+    Object lockObject = tableLockMap.get(absoluteTableIdentifier);
+    if (null == lockObject) {
+      return;
+    }
+    Map<TableBlockInfo, AbstractIndex> map = tableBlocksMap.get(absoluteTableIdentifier);
+    // if there is no loaded blocks then return
+    if (null == map) {
+      return;
+    }
+    for (TableBlockInfo blockInfos : removeTableBlocksInfos) {
+      map.remove(blockInfos);
+    }
+  }
+
+  /**
+   * remove all the details of a table this will be used in case of drop table
+   *
+   * @param absoluteTableIdentifier absolute table identifier to find the table
+   */
+  public void clear(AbsoluteTableIdentifier absoluteTableIdentifier) {
+    // removing all the details of table
+    tableLockMap.remove(absoluteTableIdentifier);
+    tableBlocksMap.remove(absoluteTableIdentifier);
+  }
+
+  /**
+   * Thread class which will be used to load the blocks
+   */
+  private class BlockLoaderThread implements Callable<AbstractIndex> {
+    /**
+     * table block info to block index map
+     */
+    private Map<TableBlockInfo, AbstractIndex> tableBlockMap;
+
+    // block info
+    private TableBlockInfo blockInfo;
+
+    private BlockLoaderThread(TableBlockInfo blockInfo,
+        Map<TableBlockInfo, AbstractIndex> tableBlockMap) {
+      this.tableBlockMap = tableBlockMap;
+      this.blockInfo = blockInfo;
+    }
+
+    @Override public AbstractIndex call() throws Exception {
+      // load and return the loaded blocks
+      return loadBlock(tableBlockMap, blockInfo);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BtreeBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BtreeBuilder.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BtreeBuilder.java
new file mode 100644
index 0000000..fb59607
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BtreeBuilder.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore;
+
+/**
+ * Below interface will be used to build the index
+ * in some data structure
+ */
+public interface BtreeBuilder {
+
+  /**
+   * Below method will be used to store the leaf collection in some data structure
+   */
+  void build(BTreeBuilderInfo blocksBuilderInfos);
+
+  /**
+   * below method to get the first data block
+   *
+   * @return data block
+   */
+  DataRefNode get();
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/DataRefNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/DataRefNode.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/DataRefNode.java
new file mode 100644
index 0000000..e81a9a6
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/DataRefNode.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+/**
+ * Interface data block reference
+ */
+public interface DataRefNode {
+
+  /**
+   * Method to get the next block this can be used while scanning when
+   * iterator of this class can be used iterate over blocks
+   *
+   * @return next block
+   */
+  DataRefNode getNextDataRefNode();
+
+  /**
+   * to get the number of keys tuples present in the block
+   *
+   * @return number of keys in the block
+   */
+  int nodeSize();
+
+  /**
+   * Method can be used to get the block index .This can be used when multiple
+   * thread can be used scan group of blocks in that can we can assign the
+   * some of the blocks to one thread and some to other
+   *
+   * @return block number
+   */
+  long nodeNumber();
+
+  /**
+   * This method will be used to get the max value of all the columns this can
+   * be used in case of filter query
+   *
+   * @param max value of all the columns
+   */
+  byte[][] getColumnsMaxValue();
+
+  /**
+   * This method will be used to get the min value of all the columns this can
+   * be used in case of filter query
+   *
+   * @param min value of all the columns
+   */
+  byte[][] getColumnsMinValue();
+
+  /**
+   * Below method will be used to get the dimension chunks
+   *
+   * @param fileReader   file reader to read the chunks from file
+   * @param blockIndexes indexes of the blocks need to be read
+   * @return dimension data chunks
+   */
+  DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader, int[] blockIndexes);
+
+  /**
+   * Below method will be used to get the dimension chunk
+   *
+   * @param fileReader file reader to read the chunk from file
+   * @param blockIndex block index to be read
+   * @return dimension data chunk
+   */
+  DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader, int blockIndexes);
+
+  /**
+   * Below method will be used to get the measure chunk
+   *
+   * @param fileReader   file reader to read the chunk from file
+   * @param blockIndexes block indexes to be read from file
+   * @return measure column data chunk
+   */
+  MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader, int[] blockIndexes);
+
+  /**
+   * Below method will be used to read the measure chunk
+   *
+   * @param fileReader file read to read the file chunk
+   * @param blockIndex block index to be read from file
+   * @return measure data chunk
+   */
+  MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/DataRefNodeFinder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/DataRefNodeFinder.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/DataRefNodeFinder.java
new file mode 100644
index 0000000..78592f7
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/DataRefNodeFinder.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore;
+
+/**
+ * Below Interface is to search a block
+ */
+public interface DataRefNodeFinder {
+
+  /**
+   * Below method will be used to get the first tentative block which matches with
+   * the search key
+   *
+   * @param dataBlocks complete data blocks present
+   * @param serachKey  key to be search
+   * @return data block
+   */
+  DataRefNode findFirstDataBlock(DataRefNode dataBlocks, IndexKey searchKey);
+
+  /**
+   * Below method will be used to get the last tentative block which matches with
+   * the search key
+   *
+   * @param dataBlocks complete data blocks present
+   * @param serachKey  key to be search
+   * @return data block
+   */
+  DataRefNode findLastDataBlock(DataRefNode dataBlocks, IndexKey searchKey);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/IndexKey.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/IndexKey.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/IndexKey.java
new file mode 100644
index 0000000..cefd32c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/IndexKey.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore;
+
+/**
+ * Index class to store the index of the segment blocklet infos
+ */
+public class IndexKey {
+
+  /**
+   * key which is generated from key generator
+   */
+  private byte[] dictionaryKeys;
+
+  /**
+   * key which was no generated using key generator
+   * <Index of FirstKey (2 bytes)><Index of SecondKey (2 bytes)><Index of NKey (2 bytes)>
+   * <First Key ByteArray><2nd Key ByteArray><N Key ByteArray>
+   */
+  private byte[] noDictionaryKeys;
+
+  public IndexKey(byte[] dictionaryKeys, byte[] noDictionaryKeys) {
+    this.dictionaryKeys = dictionaryKeys;
+    this.noDictionaryKeys = noDictionaryKeys;
+    if (null == dictionaryKeys) {
+      this.dictionaryKeys = new byte[0];
+    }
+    if (null == noDictionaryKeys) {
+      this.noDictionaryKeys = new byte[0];
+    }
+  }
+
+  /**
+   * @return the dictionaryKeys
+   */
+  public byte[] getDictionaryKeys() {
+    return dictionaryKeys;
+  }
+
+  /**
+   * @return the noDictionaryKeys
+   */
+  public byte[] getNoDictionaryKeys() {
+    return noDictionaryKeys;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/SegmentTaskIndexStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/SegmentTaskIndexStore.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/SegmentTaskIndexStore.java
new file mode 100644
index 0000000..50d462a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/SegmentTaskIndexStore.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.AbstractIndex;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentTaskIndex;
+import org.apache.carbondata.core.carbon.datastore.block.TableBlockInfo;
+import org.apache.carbondata.core.carbon.datastore.exception.IndexBuilderException;
+import org.apache.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath.DataFileUtil;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.CarbonUtilException;
+
+/**
+ * Singleton Class to handle loading, unloading,clearing,storing of the table
+ * blocks
+ */
+public class SegmentTaskIndexStore {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(SegmentTaskIndexStore.class.getName());
+  /**
+   * singleton instance
+   */
+  private static final SegmentTaskIndexStore SEGMENTTASKINDEXSTORE = new SegmentTaskIndexStore();
+
+  /**
+   * mapping of table identifier to map of segmentId_taskId to table segment
+   * reason of so many map as each segment can have multiple data file and
+   * each file will have its own btree
+   */
+  private Map<AbsoluteTableIdentifier, Map<String, Map<String, AbstractIndex>>> tableSegmentMap;
+
+  /**
+   * map of block info to lock object map, while loading the btree this will be filled
+   * and removed after loading the tree for that particular block info, this will be useful
+   * while loading the tree concurrently so only block level lock will be applied another
+   * block can be loaded concurrently
+   */
+  private Map<String, Object> segmentLockMap;
+
+  /**
+   * table and its lock object to this will be useful in case of concurrent
+   * query scenario when more than one query comes for same table and in  that
+   * case it will ensure that only one query will able to load the blocks
+   */
+  private Map<AbsoluteTableIdentifier, Object> tableLockMap;
+
+  private SegmentTaskIndexStore() {
+    tableSegmentMap =
+        new ConcurrentHashMap<AbsoluteTableIdentifier, Map<String, Map<String, AbstractIndex>>>(
+            CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    tableLockMap = new ConcurrentHashMap<AbsoluteTableIdentifier, Object>(
+        CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    segmentLockMap = new ConcurrentHashMap<String, Object>();
+  }
+
+  /**
+   * Return the instance of this class
+   *
+   * @return singleton instance
+   */
+  public static SegmentTaskIndexStore getInstance() {
+    return SEGMENTTASKINDEXSTORE;
+  }
+
+  /**
+   * Below method will be used to load the segment of segments
+   * One segment may have multiple task , so  table segment will be loaded
+   * based on task id and will return the map of taksId to table segment
+   * map
+   *
+   * @param segmentToTableBlocksInfos segment id to block info
+   * @param absoluteTableIdentifier   absolute table identifier
+   * @return map of taks id to segment mapping
+   * @throws IndexBuilderException
+   */
+  public Map<String, AbstractIndex> loadAndGetTaskIdToSegmentsMap(
+      Map<String, List<TableBlockInfo>> segmentToTableBlocksInfos,
+      AbsoluteTableIdentifier absoluteTableIdentifier) throws IndexBuilderException {
+    // task id to segment map
+    Map<String, AbstractIndex> taskIdToTableSegmentMap =
+        new HashMap<String, AbstractIndex>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    addLockObject(absoluteTableIdentifier);
+    Iterator<Entry<String, List<TableBlockInfo>>> iteratorOverSegmentBlocksInfos =
+        segmentToTableBlocksInfos.entrySet().iterator();
+    Map<String, Map<String, AbstractIndex>> tableSegmentMapTemp =
+        addTableSegmentMap(absoluteTableIdentifier);
+    Map<String, AbstractIndex> taskIdToSegmentIndexMap = null;
+    String segmentId = null;
+    String taskId = null;
+    try {
+      while (iteratorOverSegmentBlocksInfos.hasNext()) {
+        // segment id to table block mapping
+        Entry<String, List<TableBlockInfo>> next = iteratorOverSegmentBlocksInfos.next();
+        // group task id to table block info mapping for the segment
+        Map<String, List<TableBlockInfo>> taskIdToTableBlockInfoMap =
+            mappedAndGetTaskIdToTableBlockInfo(segmentToTableBlocksInfos);
+        // get the existing map of task id to table segment map
+        segmentId = next.getKey();
+        // check if segment is already loaded, if segment is already loaded
+        //no need to load the segment block
+        taskIdToSegmentIndexMap = tableSegmentMapTemp.get(segmentId);
+        if (taskIdToSegmentIndexMap == null) {
+          // get the segment loader lock object this is to avoid
+          // same segment is getting loaded multiple times
+          // in case of concurrent query
+          Object segmentLoderLockObject = segmentLockMap.get(segmentId);
+          if (null == segmentLoderLockObject) {
+            segmentLoderLockObject = addAndGetSegmentLock(segmentId);
+          }
+          // acquire lock to lod the segment
+          synchronized (segmentLoderLockObject) {
+            taskIdToSegmentIndexMap = tableSegmentMapTemp.get(segmentId);
+            if (null == taskIdToSegmentIndexMap) {
+              // creating a map of take if to table segment
+              taskIdToSegmentIndexMap = new HashMap<String, AbstractIndex>();
+              Iterator<Entry<String, List<TableBlockInfo>>> iterator =
+                  taskIdToTableBlockInfoMap.entrySet().iterator();
+              while (iterator.hasNext()) {
+                Entry<String, List<TableBlockInfo>> taskToBlockInfoList = iterator.next();
+                taskId = taskToBlockInfoList.getKey();
+                taskIdToSegmentIndexMap.put(taskId,
+                    loadBlocks(taskId, taskToBlockInfoList.getValue(), absoluteTableIdentifier));
+              }
+              tableSegmentMapTemp.put(next.getKey(), taskIdToSegmentIndexMap);
+              // removing from segment lock map as once segment is loaded
+              //if concurrent query is coming for same segment
+              // it will wait on the lock so after this segment will be already
+              //loaded so lock is not required, that is why removing the
+              // the lock object as it wont be useful
+              segmentLockMap.remove(segmentId);
+            }
+          }
+          taskIdToTableSegmentMap.putAll(taskIdToSegmentIndexMap);
+        }
+      }
+    } catch (CarbonUtilException e) {
+      LOGGER.error("Problem while loading the segment");
+      throw new IndexBuilderException(e);
+    }
+    return taskIdToTableSegmentMap;
+  }
+
+  /**
+   * Below method will be used to get the segment level lock object
+   *
+   * @param segmentId
+   * @return lock object
+   */
+  private synchronized Object addAndGetSegmentLock(String segmentId) {
+    // get the segment lock object if it is present then return
+    // otherwise add the new lock and return
+    Object segmentLoderLockObject = segmentLockMap.get(segmentId);
+    if (null == segmentLoderLockObject) {
+      segmentLoderLockObject = new Object();
+      segmentLockMap.put(segmentId, segmentLoderLockObject);
+    }
+    return segmentLoderLockObject;
+  }
+
+  /**
+   * Below code is to add table lock map which will be used to
+   * add
+   *
+   * @param absoluteTableIdentifier
+   */
+  private synchronized void addLockObject(AbsoluteTableIdentifier absoluteTableIdentifier) {
+    // add the instance to lock map if it is not present
+    if (null == tableLockMap.get(absoluteTableIdentifier)) {
+      tableLockMap.put(absoluteTableIdentifier, new Object());
+    }
+  }
+
+  /**
+   * Below method will be used to get the table segment map
+   * if table segment is not present then it will add and return
+   *
+   * @param absoluteTableIdentifier
+   * @return table segment map
+   */
+  private Map<String, Map<String, AbstractIndex>> addTableSegmentMap(
+      AbsoluteTableIdentifier absoluteTableIdentifier) {
+    // get the instance of lock object
+    Object lockObject = tableLockMap.get(absoluteTableIdentifier);
+    Map<String, Map<String, AbstractIndex>> tableSegmentMapTemp =
+        tableSegmentMap.get(absoluteTableIdentifier);
+    if (null == tableSegmentMapTemp) {
+      synchronized (lockObject) {
+        // segment id to task id to table segment map
+        tableSegmentMapTemp = tableSegmentMap.get(absoluteTableIdentifier);
+        if (null == tableSegmentMapTemp) {
+          tableSegmentMapTemp = new ConcurrentHashMap<String, Map<String, AbstractIndex>>();
+          tableSegmentMap.put(absoluteTableIdentifier, tableSegmentMapTemp);
+        }
+      }
+    }
+    return tableSegmentMapTemp;
+  }
+
+  /**
+   * Below method will be used to load the blocks
+   *
+   * @param tableBlockInfoList
+   * @return loaded segment
+   * @throws CarbonUtilException
+   */
+  private AbstractIndex loadBlocks(String taskId, List<TableBlockInfo> tableBlockInfoList,
+      AbsoluteTableIdentifier tableIdentifier) throws CarbonUtilException {
+    // all the block of one task id will be loaded together
+    // so creating a list which will have all the data file meta data to of one task
+    List<DataFileFooter> footerList =
+        CarbonUtil.readCarbonIndexFile(taskId, tableBlockInfoList, tableIdentifier);
+    AbstractIndex segment = new SegmentTaskIndex();
+    // file path of only first block is passed as it all table block info path of
+    // same task id will be same
+    segment.buildIndex(footerList);
+    return segment;
+  }
+
+  /**
+   * Below method will be used to get the task id to all the table block info belongs to
+   * that task id mapping
+   *
+   * @param segmentToTableBlocksInfos segment if to table blocks info map
+   * @return task id to table block info mapping
+   */
+  private Map<String, List<TableBlockInfo>> mappedAndGetTaskIdToTableBlockInfo(
+      Map<String, List<TableBlockInfo>> segmentToTableBlocksInfos) {
+    Map<String, List<TableBlockInfo>> taskIdToTableBlockInfoMap =
+        new HashMap<String, List<TableBlockInfo>>();
+    Iterator<Entry<String, List<TableBlockInfo>>> iterator =
+        segmentToTableBlocksInfos.entrySet().iterator();
+    while (iterator.hasNext()) {
+      Entry<String, List<TableBlockInfo>> next = iterator.next();
+      List<TableBlockInfo> value = next.getValue();
+      for (TableBlockInfo blockInfo : value) {
+        String taskNo = DataFileUtil.getTaskNo(blockInfo.getFilePath());
+        List<TableBlockInfo> list = taskIdToTableBlockInfoMap.get(taskNo);
+        if (null == list) {
+          list = new ArrayList<TableBlockInfo>();
+          taskIdToTableBlockInfoMap.put(taskNo, list);
+        }
+        list.add(blockInfo);
+      }
+
+    }
+    return taskIdToTableBlockInfoMap;
+  }
+
+  /**
+   * remove all the details of a table this will be used in case of drop table
+   *
+   * @param absoluteTableIdentifier absolute table identifier to find the table
+   */
+  public void clear(AbsoluteTableIdentifier absoluteTableIdentifier) {
+    // removing all the details of table
+    tableLockMap.remove(absoluteTableIdentifier);
+    tableSegmentMap.remove(absoluteTableIdentifier);
+  }
+
+  /**
+   * Below method will be used to remove the segment block based on
+   * segment id is passed
+   *
+   * @param segmentToBeRemoved      segment to be removed
+   * @param absoluteTableIdentifier absoluteTableIdentifier
+   */
+  public void removeTableBlocks(List<String> segmentToBeRemoved,
+      AbsoluteTableIdentifier absoluteTableIdentifier) {
+    // get the lock object if lock object is not present then it is not
+    // loaded at all
+    // we can return from here
+    Object lockObject = tableLockMap.get(absoluteTableIdentifier);
+    if (null == lockObject) {
+      return;
+    }
+    // Acquire the lock and remove only those instance which was loaded
+    Map<String, Map<String, AbstractIndex>> map = tableSegmentMap.get(absoluteTableIdentifier);
+    // if there is no loaded blocks then return
+    if (null == map) {
+      return;
+    }
+    for (String segmentId : segmentToBeRemoved) {
+      map.remove(segmentId);
+    }
+  }
+
+  /**
+   * Below method will be used to check if segment blocks
+   * is already loaded or not
+   *
+   * @param absoluteTableIdentifier
+   * @param segmentId
+   * @return is loaded then return the loaded blocks otherwise null
+   */
+  public Map<String, AbstractIndex> getSegmentBTreeIfExists(
+      AbsoluteTableIdentifier absoluteTableIdentifier, String segmentId) {
+    Map<String, Map<String, AbstractIndex>> tableSegment =
+        tableSegmentMap.get(absoluteTableIdentifier);
+    if (null == tableSegment) {
+      return null;
+    }
+    return tableSegment.get(segmentId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/AbstractIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/AbstractIndex.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/AbstractIndex.java
new file mode 100644
index 0000000..7e1ed8c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/AbstractIndex.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.block;
+
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
+
+public abstract class AbstractIndex {
+
+  /**
+   * vo class which will hold the RS information of the block
+   */
+  protected SegmentProperties segmentProperties;
+
+  /**
+   * data block
+   */
+  protected DataRefNode dataRefNode;
+
+  /**
+   * total number of row present in the block
+   */
+  protected long totalNumberOfRows;
+
+  /**
+   * @return the totalNumberOfRows
+   */
+  public long getTotalNumberOfRows() {
+    return totalNumberOfRows;
+  }
+
+  /**
+   * @return the segmentProperties
+   */
+  public SegmentProperties getSegmentProperties() {
+    return segmentProperties;
+  }
+
+  /**
+   * @return the dataBlock
+   */
+  public DataRefNode getDataRefNode() {
+    return dataRefNode;
+  }
+
+  /**
+   * Below method will be used to load the data block
+   *
+   * @param blockInfo block detail
+   */
+  public abstract void buildIndex(List<DataFileFooter> footerList);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/BlockIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/BlockIndex.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/BlockIndex.java
new file mode 100644
index 0000000..cfdb127
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/BlockIndex.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.block;
+
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.BTreeBuilderInfo;
+import org.apache.carbondata.core.carbon.datastore.BtreeBuilder;
+import org.apache.carbondata.core.carbon.datastore.impl.btree.BlockletBTreeBuilder;
+import org.apache.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
+
+/**
+ * Class which is responsible for loading the b+ tree block. This class will
+ * persist all the detail of a table block
+ */
+public class BlockIndex extends AbstractIndex {
+
+  /**
+   * Below method will be used to load the data block
+   *
+   * @param blockInfo block detail
+   */
+  public void buildIndex(List<DataFileFooter> footerList) {
+    // create a metadata details
+    // this will be useful in query handling
+    segmentProperties = new SegmentProperties(footerList.get(0).getColumnInTable(),
+        footerList.get(0).getSegmentInfo().getColumnCardinality());
+    // create a segment builder info
+    BTreeBuilderInfo indexBuilderInfo =
+        new BTreeBuilderInfo(footerList, segmentProperties.getDimensionColumnsValueSize());
+    BtreeBuilder blocksBuilder = new BlockletBTreeBuilder();
+    // load the metadata
+    blocksBuilder.build(indexBuilderInfo);
+    dataRefNode = blocksBuilder.get();
+    totalNumberOfRows = footerList.get(0).getNumberOfRows();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/Distributable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/Distributable.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/Distributable.java
new file mode 100644
index 0000000..817aafc
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/Distributable.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.block;
+
+/**
+ * Abstract class which is maintains the locations of node.
+ */
+public abstract class Distributable implements Comparable<Distributable> {
+
+  public abstract String[] getLocations();
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/SegmentProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/SegmentProperties.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/SegmentProperties.java
new file mode 100644
index 0000000..816ca3a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/SegmentProperties.java
@@ -0,0 +1,748 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.block;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnGroupModel;
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.core.keygenerator.columnar.ColumnarSplitter;
+import org.apache.carbondata.core.keygenerator.columnar.impl.MultiDimKeyVarLengthVariableSplitGenerator;
+import org.apache.carbondata.core.keygenerator.mdkey.MultiDimKeyVarLengthGenerator;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+import org.apache.commons.lang3.ArrayUtils;
+
+/**
+ * This class contains all the details about the restructuring information of
+ * the block. This will be used during query execution to handle restructure
+ * information
+ */
+public class SegmentProperties {
+
+  /**
+   * key generator of the block which was used to generate the mdkey for
+   * normal dimension. this will be required to
+   */
+  private KeyGenerator dimensionKeyGenerator;
+
+  /**
+   * list of dimension present in the block
+   */
+  private List<CarbonDimension> dimensions;
+
+  /**
+   * list of dimension present in the block
+   */
+  private List<CarbonDimension> complexDimensions;
+
+  /**
+   * list of measure present in the block
+   */
+  private List<CarbonMeasure> measures;
+
+  /**
+   * cardinality of dimension columns participated in key generator
+   */
+  private int[] dimColumnsCardinality;
+
+  /**
+   * cardinality of complex dimension
+   */
+  private int[] complexDimColumnCardinality;
+
+  /**
+   * mapping of dimension column to block in a file this will be used for
+   * reading the blocks from file
+   */
+  private Map<Integer, Integer> dimensionOrdinalToBlockMapping;
+
+  /**
+   * a block can have multiple columns. This will have block index as key
+   * and all dimension participated in that block as values
+   */
+  private Map<Integer, Set<Integer>> blockTodimensionOrdinalMapping;
+
+  /**
+   * mapping of measure column to block to in file this will be used while
+   * reading the block in a file
+   */
+  private Map<Integer, Integer> measuresOrdinalToBlockMapping;
+
+  /**
+   * size of the each dimension column value in a block this can be used when
+   * we need to do copy a cell value to create a tuple.for no dictionary
+   * column this value will be -1. for dictionary column we size of the value
+   * will be fixed.
+   */
+  private int[] eachDimColumnValueSize;
+
+  /**
+   * size of the each dimension column value in a block this can be used when
+   * we need to do copy a cell value to create a tuple.for no dictionary
+   * column this value will be -1. for dictionary column we size of the value
+   * will be fixed.
+   */
+  private int[] eachComplexDimColumnValueSize;
+
+  /**
+   * below mapping will have mapping of the column group to dimensions ordinal
+   * for example if 3 dimension present in the columngroupid 0 and its ordinal in
+   * 2,3,4 then map will contain 0,{2,3,4}
+   */
+  private Map<Integer, KeyGenerator> columnGroupAndItsKeygenartor;
+
+  /**
+   * column group key generator dimension index will not be same as dimension ordinal
+   * This will have mapping with ordinal and keygenerator or mdkey index
+   */
+  private Map<Integer, Map<Integer, Integer>> columnGroupOrdinalToMdkeymapping;
+
+  /**
+   * this will be used to split the fixed length key
+   * this will all the information about how key was created
+   * and how to split the key based on group
+   */
+  private ColumnarSplitter fixedLengthKeySplitter;
+
+  /**
+   * to store the number of no dictionary dimension
+   * this will be used during query execution for creating
+   * start and end key. Purpose of storing this value here is
+   * so during query execution no need to calculate every time
+   */
+  private int numberOfNoDictionaryDimension;
+
+  /**
+   * column group model
+   */
+  private ColumnGroupModel colGroupModel;
+
+  public SegmentProperties(List<ColumnSchema> columnsInTable, int[] columnCardinality) {
+    dimensions = new ArrayList<CarbonDimension>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    complexDimensions =
+        new ArrayList<CarbonDimension>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    measures = new ArrayList<CarbonMeasure>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    fillDimensionAndMeasureDetails(columnsInTable, columnCardinality);
+    dimensionOrdinalToBlockMapping =
+        new HashMap<Integer, Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    blockTodimensionOrdinalMapping =
+        new HashMap<Integer, Set<Integer>>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    measuresOrdinalToBlockMapping =
+        new HashMap<Integer, Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    intialiseColGroups(columnsInTable);
+    fillOrdinalToBlockMappingForDimension();
+    fillOrdinalToBlockIndexMappingForMeasureColumns();
+    fillColumnGroupAndItsCardinality(columnCardinality);
+    fillKeyGeneratorDetails();
+  }
+
+  /**
+   * it fills column groups
+   * e.g {{1},{2,3,4},{5},{6},{7,8,9}}
+   *
+   * @param columnsInTable
+   */
+  private void intialiseColGroups(List<ColumnSchema> columnsInTable) {
+    // StringBuffer columnGroups = new StringBuffer();
+    List<List<Integer>> colGrpList = new ArrayList<List<Integer>>();
+    List<Integer> group = new ArrayList<Integer>();
+    for (int i = 0; i < dimensions.size(); i++) {
+      CarbonDimension dimension = dimensions.get(i);
+      if (!dimension.hasEncoding(Encoding.DICTIONARY)) {
+        continue;
+      }
+      group.add(dimension.getOrdinal());
+      // columnGroups.append(dimension.getOrdinal());
+      if (i < dimensions.size() - 1) {
+        int currGroupOrdinal = dimension.columnGroupId();
+        int nextGroupOrdinal = dimensions.get(i + 1).columnGroupId();
+        if (!(currGroupOrdinal == nextGroupOrdinal && currGroupOrdinal != -1)) {
+          colGrpList.add(group);
+          group = new ArrayList<Integer>();
+        }
+      } else {
+        colGrpList.add(group);
+      }
+
+    }
+    int[][] colGroups = new int[colGrpList.size()][];
+    for (int i = 0; i < colGroups.length; i++) {
+      colGroups[i] = new int[colGrpList.get(i).size()];
+      for (int j = 0; j < colGroups[i].length; j++) {
+        colGroups[i][j] = colGrpList.get(i).get(j);
+      }
+    }
+    this.colGroupModel = CarbonUtil.getColGroupModel(colGroups);
+  }
+
+  /**
+   * below method is to fill the dimension and its mapping to file blocks all
+   * the column will point to same column group
+   */
+  private void fillOrdinalToBlockMappingForDimension() {
+    int blockOrdinal = -1;
+    CarbonDimension dimension = null;
+    int index = 0;
+    int prvcolumnGroupId = -1;
+    while (index < dimensions.size()) {
+      dimension = dimensions.get(index);
+      // if column id is same as previous one then block index will be
+      // same
+      if (dimension.isColumnar() || dimension.columnGroupId() != prvcolumnGroupId) {
+        blockOrdinal++;
+      }
+      dimensionOrdinalToBlockMapping.put(dimension.getOrdinal(), blockOrdinal);
+      prvcolumnGroupId = dimension.columnGroupId();
+      index++;
+    }
+    index = 0;
+    // complex dimension will be stored at last
+    while (index < complexDimensions.size()) {
+      dimension = complexDimensions.get(index);
+      dimensionOrdinalToBlockMapping.put(dimension.getOrdinal(), ++blockOrdinal);
+      blockOrdinal = fillComplexDimensionChildBlockIndex(blockOrdinal, dimension);
+      index++;
+    }
+    fillBlockToDimensionOrdinalMapping();
+  }
+
+  /**
+   *
+   */
+  private void fillBlockToDimensionOrdinalMapping() {
+    Set<Entry<Integer, Integer>> blocks = dimensionOrdinalToBlockMapping.entrySet();
+    Iterator<Entry<Integer, Integer>> blockItr = blocks.iterator();
+    while (blockItr.hasNext()) {
+      Entry<Integer, Integer> block = blockItr.next();
+      Set<Integer> dimensionOrdinals = blockTodimensionOrdinalMapping.get(block.getValue());
+      if (dimensionOrdinals == null) {
+        dimensionOrdinals = new HashSet<Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+        blockTodimensionOrdinalMapping.put(block.getValue(), dimensionOrdinals);
+      }
+      dimensionOrdinals.add(block.getKey());
+    }
+  }
+
+  /**
+   * Below method will be used to add the complex dimension child
+   * block index.It is a recursive method which will be get the children
+   * add the block index
+   *
+   * @param blockOrdinal start block ordinal
+   * @param dimension    parent dimension
+   * @return last block index
+   */
+  private int fillComplexDimensionChildBlockIndex(int blockOrdinal, CarbonDimension dimension) {
+    for (int i = 0; i < dimension.numberOfChild(); i++) {
+      dimensionOrdinalToBlockMapping
+          .put(dimension.getListOfChildDimensions().get(i).getOrdinal(), ++blockOrdinal);
+      if (dimension.getListOfChildDimensions().get(i).numberOfChild() > 0) {
+        blockOrdinal = fillComplexDimensionChildBlockIndex(blockOrdinal,
+            dimension.getListOfChildDimensions().get(i));
+      }
+    }
+    return blockOrdinal;
+  }
+
+  /**
+   * Below method will be used to fill the mapping
+   * of measure ordinal to its block index mapping in
+   * file
+   */
+  private void fillOrdinalToBlockIndexMappingForMeasureColumns() {
+    int blockOrdinal = 0;
+    int index = 0;
+    while (index < measures.size()) {
+      measuresOrdinalToBlockMapping.put(measures.get(index).getOrdinal(), blockOrdinal);
+      blockOrdinal++;
+      index++;
+    }
+  }
+
+  /**
+   * below method will fill dimension and measure detail of the block.
+   *
+   * @param columnsInTable
+   * @param columnCardinality
+   */
+  private void fillDimensionAndMeasureDetails(List<ColumnSchema> columnsInTable,
+      int[] columnCardinality) {
+    ColumnSchema columnSchema = null;
+    // ordinal will be required to read the data from file block
+    int dimensonOrdinal = 0;
+    int measureOrdinal = -1;
+    // table ordinal is actually a schema ordinal this is required as
+    // cardinality array
+    // which is stored in segment info contains -1 if that particular column
+    // is n
+    int tableOrdinal = -1;
+    // creating a list as we do not know how many dimension not participated
+    // in the mdkey
+    List<Integer> cardinalityIndexForNormalDimensionColumn =
+        new ArrayList<Integer>(columnsInTable.size());
+    // creating a list as we do not know how many dimension not participated
+    // in the mdkey
+    List<Integer> cardinalityIndexForComplexDimensionColumn =
+        new ArrayList<Integer>(columnsInTable.size());
+    boolean isComplexDimensionStarted = false;
+    CarbonDimension carbonDimension = null;
+    // to store the position of dimension in surrogate key array which is
+    // participating in mdkey
+    int keyOrdinal = 0;
+    int previousColumnGroup = -1;
+    // to store the ordinal of the column group ordinal
+    int columnGroupOrdinal = 0;
+    int counter = 0;
+    int complexTypeOrdinal = 0;
+    while (counter < columnsInTable.size()) {
+      columnSchema = columnsInTable.get(counter);
+      if (columnSchema.isDimensionColumn()) {
+        tableOrdinal++;
+        // not adding the cardinality of the non dictionary
+        // column as it was not the part of mdkey
+        if (CarbonUtil.hasEncoding(columnSchema.getEncodingList(), Encoding.DICTIONARY)
+            && !isComplexDimensionStarted && columnSchema.getNumberOfChild() == 0) {
+          cardinalityIndexForNormalDimensionColumn.add(tableOrdinal);
+          if (columnSchema.isColumnar()) {
+            // if it is a columnar dimension participated in mdkey then added
+            // key ordinal and dimension ordinal
+            carbonDimension =
+                new CarbonDimension(columnSchema, dimensonOrdinal++, keyOrdinal++, -1, -1);
+          } else {
+            // if not columnnar then it is a column group dimension
+
+            // below code to handle first dimension of the column group
+            // in this case ordinal of the column group will be 0
+            if (previousColumnGroup != columnSchema.getColumnGroupId()) {
+              columnGroupOrdinal = 0;
+              carbonDimension = new CarbonDimension(columnSchema, dimensonOrdinal++, keyOrdinal++,
+                  columnGroupOrdinal++, -1);
+            }
+            // if previous dimension  column group id is same as current then
+            // then its belongs to same row group
+            else {
+              carbonDimension = new CarbonDimension(columnSchema, dimensonOrdinal++, keyOrdinal++,
+                  columnGroupOrdinal++, -1);
+            }
+            previousColumnGroup = columnSchema.getColumnGroupId();
+          }
+        }
+        // as complex type will be stored at last so once complex type started all the dimension
+        // will be added to complex type
+        else if (isComplexDimensionStarted || CarbonUtil.hasDataType(columnSchema.getDataType(),
+            new DataType[] { DataType.ARRAY, DataType.STRUCT })) {
+          cardinalityIndexForComplexDimensionColumn.add(tableOrdinal);
+          carbonDimension =
+              new CarbonDimension(columnSchema, dimensonOrdinal++, -1, -1, complexTypeOrdinal++);
+          carbonDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
+          complexDimensions.add(carbonDimension);
+          isComplexDimensionStarted = true;
+          int previouseOrdinal = dimensonOrdinal;
+          dimensonOrdinal =
+              readAllComplexTypeChildrens(dimensonOrdinal, columnSchema.getNumberOfChild(),
+                  columnsInTable, carbonDimension, complexTypeOrdinal);
+          int numberOfChildrenDimensionAdded = dimensonOrdinal - previouseOrdinal;
+          for (int i = 0; i < numberOfChildrenDimensionAdded; i++) {
+            cardinalityIndexForComplexDimensionColumn.add(++tableOrdinal);
+          }
+          counter = dimensonOrdinal;
+          complexTypeOrdinal = carbonDimension.getListOfChildDimensions()
+              .get(carbonDimension.getListOfChildDimensions().size() - 1).getComplexTypeOrdinal();
+          complexTypeOrdinal++;
+          continue;
+        } else {
+          // for no dictionary dimension
+          carbonDimension = new CarbonDimension(columnSchema, dimensonOrdinal++, -1, -1, -1);
+          numberOfNoDictionaryDimension++;
+        }
+        dimensions.add(carbonDimension);
+      } else {
+        measures.add(new CarbonMeasure(columnSchema, ++measureOrdinal));
+      }
+      counter++;
+    }
+    dimColumnsCardinality = new int[cardinalityIndexForNormalDimensionColumn.size()];
+    complexDimColumnCardinality = new int[cardinalityIndexForComplexDimensionColumn.size()];
+    int index = 0;
+    // filling the cardinality of the dimension column to create the key
+    // generator
+    for (Integer cardinalityArrayIndex : cardinalityIndexForNormalDimensionColumn) {
+      dimColumnsCardinality[index++] = columnCardinality[cardinalityArrayIndex];
+    }
+    index = 0;
+    // filling the cardinality of the complex dimension column to create the
+    // key generator
+    for (Integer cardinalityArrayIndex : cardinalityIndexForComplexDimensionColumn) {
+      complexDimColumnCardinality[index++] = columnCardinality[cardinalityArrayIndex];
+    }
+  }
+
+  /**
+   * Read all primitive/complex children and set it as list of child carbon dimension to parent
+   * dimension
+   *
+   * @param dimensionOrdinal
+   * @param childCount
+   * @param listOfColumns
+   * @param parentDimension
+   * @return
+   */
+  private int readAllComplexTypeChildrens(int dimensionOrdinal, int childCount,
+      List<ColumnSchema> listOfColumns, CarbonDimension parentDimension,
+      int complexDimensionOrdianl) {
+    for (int i = 0; i < childCount; i++) {
+      ColumnSchema columnSchema = listOfColumns.get(dimensionOrdinal);
+      if (columnSchema.isDimensionColumn()) {
+        if (columnSchema.getNumberOfChild() > 0) {
+          CarbonDimension complexDimension =
+              new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1,
+                  complexDimensionOrdianl++);
+          complexDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
+          parentDimension.getListOfChildDimensions().add(complexDimension);
+          dimensionOrdinal =
+              readAllComplexTypeChildrens(dimensionOrdinal, columnSchema.getNumberOfChild(),
+                  listOfColumns, complexDimension, complexDimensionOrdianl);
+        } else {
+          parentDimension.getListOfChildDimensions().add(
+              new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1,
+                  complexDimensionOrdianl++));
+        }
+      }
+    }
+    return dimensionOrdinal;
+  }
+
+  /**
+   * Below method will fill the key generator detail of both the type of key
+   * generator. This will be required for during both query execution and data
+   * loading.
+   */
+  private void fillKeyGeneratorDetails() {
+    // create a dimension partitioner list
+    // this list will contain information about how dimension value are
+    // stored
+    // it is stored in group or individually
+    List<Integer> dimensionPartitionList =
+        new ArrayList<Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    List<Boolean> isDictionaryColumn =
+        new ArrayList<Boolean>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    int prvcolumnGroupId = -1;
+    int counter = 0;
+    while (counter < dimensions.size()) {
+      CarbonDimension carbonDimension = dimensions.get(counter);
+      // if dimension is not a part of mdkey then no need to add
+      if (!carbonDimension.getEncoder().contains(Encoding.DICTIONARY)) {
+        isDictionaryColumn.add(false);
+        counter++;
+        continue;
+      }
+      // columnar column is stored individually
+      // so add one
+      if (carbonDimension.isColumnar()) {
+        dimensionPartitionList.add(1);
+        isDictionaryColumn.add(true);
+      }
+      // if in a group then need to add how many columns a selected in
+      // group
+      if (!carbonDimension.isColumnar() && carbonDimension.columnGroupId() == prvcolumnGroupId) {
+        // incrementing the previous value of the list as it is in same column group
+        dimensionPartitionList.set(dimensionPartitionList.size() - 1,
+            dimensionPartitionList.get(dimensionPartitionList.size() - 1) + 1);
+      } else if (!carbonDimension.isColumnar()) {
+        dimensionPartitionList.add(1);
+        isDictionaryColumn.add(true);
+      }
+      prvcolumnGroupId = carbonDimension.columnGroupId();
+      counter++;
+    }
+    // get the partitioner
+    int[] dimensionPartitions = ArrayUtils
+        .toPrimitive(dimensionPartitionList.toArray(new Integer[dimensionPartitionList.size()]));
+    // get the bit length of each column
+    int[] bitLength = CarbonUtil.getDimensionBitLength(dimColumnsCardinality, dimensionPartitions);
+    // create a key generator
+    this.dimensionKeyGenerator = new MultiDimKeyVarLengthGenerator(bitLength);
+    this.fixedLengthKeySplitter =
+        new MultiDimKeyVarLengthVariableSplitGenerator(bitLength, dimensionPartitions);
+    // get the size of each value in file block
+    int[] dictionayDimColumnValueSize = fixedLengthKeySplitter.getBlockKeySize();
+    int index = -1;
+    this.eachDimColumnValueSize = new int[isDictionaryColumn.size()];
+    for (int i = 0; i < eachDimColumnValueSize.length; i++) {
+      if (!isDictionaryColumn.get(i)) {
+        eachDimColumnValueSize[i] = -1;
+        continue;
+      }
+      eachDimColumnValueSize[i] = dictionayDimColumnValueSize[++index];
+    }
+    if (complexDimensions.size() > 0) {
+      int[] complexDimesionParition = new int[complexDimColumnCardinality.length];
+      // as complex dimension will be stored in column format add one
+      Arrays.fill(complexDimesionParition, 1);
+      bitLength =
+          CarbonUtil.getDimensionBitLength(complexDimColumnCardinality, complexDimesionParition);
+      for (int i = 0; i < bitLength.length; i++) {
+        if (complexDimColumnCardinality[i] == 0) {
+          bitLength[i] = 64;
+        }
+      }
+      ColumnarSplitter keySplitter =
+          new MultiDimKeyVarLengthVariableSplitGenerator(bitLength, complexDimesionParition);
+      eachComplexDimColumnValueSize = keySplitter.getBlockKeySize();
+    } else {
+      eachComplexDimColumnValueSize = new int[0];
+    }
+  }
+
+  /**
+   * Below method will be used to create a mapping of column group and its column cardinality this
+   * mapping will have column group id to cardinality of the dimension present in
+   * the column group.This mapping will be used during query execution, to create
+   * a mask key for the column group dimension which will be used in aggregation
+   * and filter query as column group dimension will be stored at the bit level
+   */
+  private void fillColumnGroupAndItsCardinality(int[] cardinality) {
+    // mapping of the column group and its ordinal
+    Map<Integer, List<Integer>> columnGroupAndOrdinalMapping =
+        new HashMap<Integer, List<Integer>>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    // to store a column group
+    List<Integer> currentColumnGroup = null;
+    // current index
+    int index = 0;
+    // previous column group to check all the column of column id has bee selected
+    int prvColumnGroupId = -1;
+    while (index < dimensions.size()) {
+      // if dimension group id is not zero and it is same as the previous
+      // column id
+      // then we need to add ordinal of that column as it belongs to same
+      // column group
+      if (!dimensions.get(index).isColumnar()
+          && dimensions.get(index).columnGroupId() == prvColumnGroupId
+          && null != currentColumnGroup) {
+        currentColumnGroup.add(index);
+      }
+      // if column is not a columnar then new column group has come
+      // so we need to create a list of new column id group and add the
+      // ordinal
+      else if (!dimensions.get(index).isColumnar()) {
+        currentColumnGroup = new ArrayList<Integer>();
+        columnGroupAndOrdinalMapping.put(dimensions.get(index).columnGroupId(), currentColumnGroup);
+        currentColumnGroup.add(index);
+      }
+      // update the column id every time,this is required to group the
+      // columns
+      // of the same column group
+      prvColumnGroupId = dimensions.get(index).columnGroupId();
+      index++;
+    }
+    // Initializing the map
+    this.columnGroupAndItsKeygenartor =
+        new HashMap<Integer, KeyGenerator>(columnGroupAndOrdinalMapping.size());
+    this.columnGroupOrdinalToMdkeymapping = new HashMap<>(columnGroupAndOrdinalMapping.size());
+    int[] columnGroupCardinality = null;
+    index = 0;
+    Iterator<Entry<Integer, List<Integer>>> iterator =
+        columnGroupAndOrdinalMapping.entrySet().iterator();
+    while (iterator.hasNext()) {
+      Entry<Integer, List<Integer>> next = iterator.next();
+      List<Integer> currentGroupOrdinal = next.getValue();
+      Map<Integer, Integer> colGrpOrdinalMdkeyMapping = new HashMap<>(currentGroupOrdinal.size());
+      // create the cardinality array
+      columnGroupCardinality = new int[currentGroupOrdinal.size()];
+      for (int i = 0; i < columnGroupCardinality.length; i++) {
+        // fill the cardinality
+        columnGroupCardinality[i] = cardinality[currentGroupOrdinal.get(i)];
+        colGrpOrdinalMdkeyMapping.put(currentGroupOrdinal.get(i), i);
+      }
+      this.columnGroupAndItsKeygenartor.put(next.getKey(), new MultiDimKeyVarLengthGenerator(
+          CarbonUtil.getDimensionBitLength(columnGroupCardinality,
+              new int[] { columnGroupCardinality.length })));
+      this.columnGroupOrdinalToMdkeymapping.put(next.getKey(), colGrpOrdinalMdkeyMapping);
+    }
+  }
+
+  /**
+   * Below method is to get the value of each dimension column. As this method
+   * will be used only once so we can merge both the dimension and complex
+   * dimension array. Complex dimension will be store at last so first copy
+   * the normal dimension the copy the complex dimension size. If we store
+   * this value as a class variable unnecessarily we will waste some space
+   *
+   * @return each dimension value size
+   */
+  public int[] getDimensionColumnsValueSize() {
+    int[] dimensionValueSize =
+        new int[eachDimColumnValueSize.length + eachComplexDimColumnValueSize.length];
+    System
+        .arraycopy(eachDimColumnValueSize, 0, dimensionValueSize, 0, eachDimColumnValueSize.length);
+    System.arraycopy(eachComplexDimColumnValueSize, 0, dimensionValueSize,
+        eachDimColumnValueSize.length, eachComplexDimColumnValueSize.length);
+    return dimensionValueSize;
+  }
+
+  /**
+   * @return the dimensionKeyGenerator
+   */
+  public KeyGenerator getDimensionKeyGenerator() {
+    return dimensionKeyGenerator;
+  }
+
+  /**
+   * @return the dimensions
+   */
+  public List<CarbonDimension> getDimensions() {
+    return dimensions;
+  }
+
+  /**
+   * @return the complexDimensions
+   */
+  public List<CarbonDimension> getComplexDimensions() {
+    return complexDimensions;
+  }
+
+  /**
+   * @return the measures
+   */
+  public List<CarbonMeasure> getMeasures() {
+    return measures;
+  }
+
+  /**
+   * @return the dimColumnsCardinality
+   */
+  public int[] getDimColumnsCardinality() {
+    return dimColumnsCardinality;
+  }
+
+  /**
+   * @return the complexDimColumnCardinality
+   */
+  public int[] getComplexDimColumnCardinality() {
+    return complexDimColumnCardinality;
+  }
+
+  /**
+   * @return the dimensionOrdinalToBlockMapping
+   */
+  public Map<Integer, Integer> getDimensionOrdinalToBlockMapping() {
+    return dimensionOrdinalToBlockMapping;
+  }
+
+  /**
+   * @return the measuresOrdinalToBlockMapping
+   */
+  public Map<Integer, Integer> getMeasuresOrdinalToBlockMapping() {
+    return measuresOrdinalToBlockMapping;
+  }
+
+  /**
+   * @return the eachDimColumnValueSize
+   */
+  public int[] getEachDimColumnValueSize() {
+    return eachDimColumnValueSize;
+  }
+
+  /**
+   * @return the eachComplexDimColumnValueSize
+   */
+  public int[] getEachComplexDimColumnValueSize() {
+    return eachComplexDimColumnValueSize;
+  }
+
+  /**
+   * @return the fixedLengthKeySplitter
+   */
+  public ColumnarSplitter getFixedLengthKeySplitter() {
+    return fixedLengthKeySplitter;
+  }
+
+  /**
+   * @return the columnGroupAndItsKeygenartor
+   */
+  public Map<Integer, KeyGenerator> getColumnGroupAndItsKeygenartor() {
+    return columnGroupAndItsKeygenartor;
+  }
+
+  /**
+   * @return the numberOfNoDictionaryDimension
+   */
+  public int getNumberOfNoDictionaryDimension() {
+    return numberOfNoDictionaryDimension;
+  }
+
+  /**
+   * @return
+   */
+  public int[][] getColumnGroups() {
+    return colGroupModel.getColumnGroup();
+  }
+
+  /**
+   * @return colGroupModel
+   */
+  public ColumnGroupModel getColumnGroupModel() {
+    return this.colGroupModel;
+  }
+
+  /**
+   * get mdkey ordinal for given dimension ordinal of given column group
+   *
+   * @param colGrpId
+   * @param ordinal
+   * @return mdkeyordinal
+   */
+  public int getColumnGroupMdKeyOrdinal(int colGrpId, int ordinal) {
+    return columnGroupOrdinalToMdkeymapping.get(colGrpId).get(ordinal);
+  }
+
+  /**
+   * It returns no of column availble in given column group
+   *
+   * @param colGrpId
+   * @return no of column in given column group
+   */
+  public int getNoOfColumnsInColumnGroup(int colGrpId) {
+    return columnGroupOrdinalToMdkeymapping.get(colGrpId).size();
+  }
+
+  /**
+   * @param blockIndex
+   * @return It returns all dimension present in given block index
+   */
+  public Set<Integer> getDimensionOrdinalForBlock(int blockIndex) {
+    return blockTodimensionOrdinalMapping.get(blockIndex);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/SegmentTaskIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/SegmentTaskIndex.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/SegmentTaskIndex.java
new file mode 100644
index 0000000..ce7a63a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/SegmentTaskIndex.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.block;
+
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.BTreeBuilderInfo;
+import org.apache.carbondata.core.carbon.datastore.BtreeBuilder;
+import org.apache.carbondata.core.carbon.datastore.impl.btree.BlockBTreeBuilder;
+import org.apache.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
+
+/**
+ * Class which is responsible for loading the b+ tree block. This class will
+ * persist all the detail of a table segment
+ */
+public class SegmentTaskIndex extends AbstractIndex {
+
+  /**
+   * Below method is store the blocks in some data structure
+   *
+   * @param blockInfo block detail
+   */
+  public void buildIndex(List<DataFileFooter> footerList) {
+    // create a metadata details
+    // this will be useful in query handling
+    // all the data file metadata will have common segment properties we
+    // can use first one to get create the segment properties
+    segmentProperties = new SegmentProperties(footerList.get(0).getColumnInTable(),
+        footerList.get(0).getSegmentInfo().getColumnCardinality());
+    // create a segment builder info
+    // in case of segment create we do not need any file path and each column value size
+    // as Btree will be build as per min max and start key
+    BTreeBuilderInfo btreeBuilderInfo = new BTreeBuilderInfo(footerList, null);
+    BtreeBuilder blocksBuilder = new BlockBTreeBuilder();
+    // load the metadata
+    blocksBuilder.build(btreeBuilderInfo);
+    dataRefNode = blocksBuilder.get();
+    for (DataFileFooter footer : footerList) {
+      totalNumberOfRows += footer.getNumberOfRows();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TableBlockInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TableBlockInfo.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TableBlockInfo.java
new file mode 100644
index 0000000..06166fd
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TableBlockInfo.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.block;
+
+import java.io.Serializable;
+import java.util.Arrays;
+
+import org.apache.carbondata.core.carbon.path.CarbonTablePath;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath.DataFileUtil;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+
+/**
+ * class will be used to pass the block detail detail will be passed form driver
+ * to all the executor to load the b+ tree
+ */
+public class TableBlockInfo extends Distributable
+    implements Serializable, Comparable<Distributable> {
+
+  /**
+   * serialization id
+   */
+  private static final long serialVersionUID = -6502868998599821172L;
+
+  /**
+   * full qualified file path of the block
+   */
+  private String filePath;
+
+  /**
+   * block offset in the file
+   */
+  private long blockOffset;
+
+  /**
+   * length of the block
+   */
+  private long blockLength;
+
+  /**
+   * id of the segment this will be used to sort the blocks
+   */
+  private String segmentId;
+
+  private String[] locations;
+
+
+  public TableBlockInfo(String filePath, long blockOffset, String segmentId, String[] locations,
+      long blockLength) {
+    this.filePath = FileFactory.getUpdatedFilePath(filePath);
+    this.blockOffset = blockOffset;
+    this.segmentId = segmentId;
+    this.locations = locations;
+    this.blockLength = blockLength;
+  }
+
+  /**
+   * @return the filePath
+   */
+  public String getFilePath() {
+    return filePath;
+  }
+
+  /**
+   * @return the blockOffset
+   */
+  public long getBlockOffset() {
+    return blockOffset;
+  }
+
+
+  /**
+   * @return the segmentId
+   */
+  public String getSegmentId() {
+    return segmentId;
+  }
+
+  /**
+   * @return the blockLength
+   */
+  public long getBlockLength() {
+    return blockLength;
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see java.lang.Object#equals(java.lang.Object)
+   */
+  @Override public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (!(obj instanceof TableBlockInfo)) {
+      return false;
+    }
+    TableBlockInfo other = (TableBlockInfo) obj;
+    if (!segmentId.equals(other.segmentId)) {
+      return false;
+    }
+    if (blockOffset != other.blockOffset) {
+      return false;
+    }
+    if (blockLength != other.blockLength) {
+      return false;
+    }
+
+    if (filePath == null) {
+      if (other.filePath != null) {
+        return false;
+      }
+    } else if (!filePath.equals(other.filePath)) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Below method will used to compare to TableBlockInfos object this will
+   * used for sorting Comparison logic is: 1. compare segment id if segment id
+   * is same 2. compare task id if task id is same 3. compare offsets of the
+   * block
+   */
+  @Override public int compareTo(Distributable other) {
+
+    int compareResult = 0;
+    // get the segment id
+    // converr seg ID to double.
+
+    double seg1 = Double.parseDouble(segmentId);
+    double seg2 = Double.parseDouble(((TableBlockInfo) other).segmentId);
+    if (seg1 - seg2 < 0) {
+      return -1;
+    }
+    if (seg1 - seg2 > 0) {
+      return 1;
+    }
+
+    // Comparing the time task id of the file to other
+    // if both the task id of the file is same then we need to compare the
+    // offset of
+    // the file
+    if (CarbonTablePath.isCarbonDataFile(filePath)) {
+      int firstTaskId = Integer.parseInt(DataFileUtil.getTaskNo(filePath));
+      int otherTaskId = Integer.parseInt(DataFileUtil.getTaskNo(((TableBlockInfo) other).filePath));
+      if (firstTaskId != otherTaskId) {
+        return firstTaskId - otherTaskId;
+      }
+      // compare the part no of both block info
+      int firstPartNo = Integer.parseInt(DataFileUtil.getPartNo(filePath));
+      int SecondPartNo =
+          Integer.parseInt(DataFileUtil.getPartNo(((TableBlockInfo) other).filePath));
+      compareResult = firstPartNo - SecondPartNo;
+    } else {
+      compareResult = filePath.compareTo(((TableBlockInfo) other).getFilePath());
+    }
+    if (compareResult != 0) {
+      return compareResult;
+    }
+    //compare result is not 0 then return
+    // if part no is also same then compare the offset and length of the block
+    if (blockOffset + blockLength
+        < ((TableBlockInfo) other).blockOffset + ((TableBlockInfo) other).blockLength) {
+      return -1;
+    } else if (blockOffset + blockLength
+        > ((TableBlockInfo) other).blockOffset + ((TableBlockInfo) other).blockLength) {
+      return 1;
+    }
+    return 0;
+  }
+
+  @Override public int hashCode() {
+    int result = filePath.hashCode();
+    result = 31 * result + (int) (blockOffset ^ (blockOffset >>> 32));
+    result = 31 * result + (int) (blockLength ^ (blockLength >>> 32));
+    result = 31 * result + segmentId.hashCode();
+    result = 31 * result + Arrays.hashCode(locations);
+    return result;
+  }
+
+  @Override public String[] getLocations() {
+    return locations;
+  }
+
+}


[02/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/NoDictionaryTypeVisitor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/NoDictionaryTypeVisitor.java b/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/NoDictionaryTypeVisitor.java
deleted file mode 100644
index f8b76e0..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/NoDictionaryTypeVisitor.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.resolver.resolverinfo.visitor;
-
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.DimColumnFilterInfo;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-
-public class NoDictionaryTypeVisitor implements ResolvedFilterInfoVisitorIntf {
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(NoDictionaryTypeVisitor.class.getName());
-
-  /**
-   * Visitor Method will update the filter related details in visitableObj, For no dictionary
-   * type columns the filter members will resolved directly, no need to look up in dictionary
-   * since it will not be part of dictionary, directly the actual data can be converted as
-   * byte[] and can be set. this type of encoding is effective when the particular column
-   * is having very high cardinality.
-   *
-   * @param visitableObj
-   * @param metadata
-   * @throws FilterUnsupportedException,if exception occurs while evaluating
-   * filter models.
-   */
-  public void populateFilterResolvedInfo(DimColumnResolvedFilterInfo visitableObj,
-      FilterResolverMetadata metadata) throws FilterUnsupportedException {
-    DimColumnFilterInfo resolvedFilterObject = null;
-    List<String> evaluateResultListFinal;
-    try {
-      evaluateResultListFinal = metadata.getExpression().evaluate(null).getListAsString();
-      // Adding default  null member inorder to not display the same while
-      // displaying the report as per hive compatibility.
-      if (!metadata.isIncludeFilter() && !evaluateResultListFinal
-          .contains(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
-        evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
-      }
-    } catch (FilterIllegalMemberException e) {
-      throw new FilterUnsupportedException(e);
-    }
-    resolvedFilterObject = FilterUtil
-        .getNoDictionaryValKeyMemberForFilter(metadata.getTableIdentifier(),
-            metadata.getColumnExpression(), evaluateResultListFinal, metadata.isIncludeFilter());
-    visitableObj.setFilterValues(resolvedFilterObject);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java b/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java
deleted file mode 100644
index 1cd9197..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/ResolvedFilterInfoVisitorIntf.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.resolver.resolverinfo.visitor;
-
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-
-public interface ResolvedFilterInfoVisitorIntf {
-
-  /**
-   * Visitor pattern is been used in this scenario inorder to populate the
-   * dimColResolvedFilterInfo visitable object with filter member values based
-   * on the visitor type, currently there 3 types of visitors custom,direct
-   * and no dictionary, all types of visitor populate the visitable instance
-   * as per its buisness logic which is different for all the visitors.
-   *
-   * @param visitableObj
-   * @param metadata
-   * @throws QueryExecutionException
-   */
-  void populateFilterResolvedInfo(DimColumnResolvedFilterInfo visitableObj,
-      FilterResolverMetadata metadata) throws FilterUnsupportedException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/model/CarbonQueryPlan.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/model/CarbonQueryPlan.java b/core/src/main/java/org/carbondata/scan/model/CarbonQueryPlan.java
deleted file mode 100644
index d914b06..0000000
--- a/core/src/main/java/org/carbondata/scan/model/CarbonQueryPlan.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- *
- */
-package org.carbondata.scan.model;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.scan.expression.Expression;
-
-/**
- * This class contains all the logical information about the query like dimensions,measures,
- * sort order, topN etc..
- */
-public class CarbonQueryPlan implements Serializable {
-  /**
-   *
-   */
-  private static final long serialVersionUID = -9036044826928017164L;
-
-  /**
-   * Database name
-   */
-  private String databaseName;
-
-  /**
-   * Table name
-   */
-  private String tableName;
-
-  /**
-   * List of dimensions.
-   * Ex : select employee_name,department_name,sum(salary) from employee, then employee_name
-   * and department_name are dimensions
-   * If there is no dimensions asked in query then it would be remained as empty.
-   */
-  private List<QueryDimension> dimensions =
-      new ArrayList<QueryDimension>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-
-  /**
-   * List of measures.
-   * Ex : select employee_name,department_name,sum(salary) from employee, then sum(salary)
-   * would be measure.
-   * If there is no dimensions asked in query then it would be remained as empty.
-   */
-  private List<QueryMeasure> measures =
-      new ArrayList<QueryMeasure>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-
-  /**
-   * Limit
-   */
-  private int limit = -1;
-
-  /**
-   * If it is detail query, no need to aggregate in backend
-   */
-  private boolean detailQuery;
-
-  /**
-   * expression
-   */
-  private Expression expression;
-
-  /**
-   * queryId
-   */
-  private String queryId;
-
-  /**
-   * outLocationPath
-   */
-  private String outLocationPath;
-
-  /**
-   * isCountStarQuery
-   */
-  private boolean isCountStartQuery;
-
-  private List<QueryDimension> sortedDimensions;
-
-  /**
-   * If it is raw detail query, no need to aggregate in backend. And it reurns with dictionary data
-   * with out decoding.
-   */
-  private boolean rawDetailQuery;
-
-  /**
-   * Constructor created with table name.
-   *
-   * @param tableName
-   */
-  public CarbonQueryPlan(String tableName) {
-    this.tableName = tableName;
-  }
-
-  /**
-   * Constructor created with database name and table name.
-   *
-   * @param databaseName
-   * @param tableName
-   */
-  public CarbonQueryPlan(String databaseName, String tableName) {
-    this.tableName = tableName;
-    this.databaseName = databaseName;
-  }
-
-  /**
-   * @return the dimensions
-   */
-  public List<QueryDimension> getDimensions() {
-    return dimensions;
-  }
-
-  public void addDimension(QueryDimension dimension) {
-    this.dimensions.add(dimension);
-  }
-
-  /**
-   * @return the measures
-   */
-  public List<QueryMeasure> getMeasures() {
-    return measures;
-  }
-
-  public void addMeasure(QueryMeasure measure) {
-    this.measures.add(measure);
-  }
-
-  public Expression getFilterExpression() {
-    return expression;
-  }
-
-  public void setFilterExpression(Expression expression) {
-    this.expression = expression;
-  }
-
-  /**
-   * @return the databaseName
-   */
-  public String getDatabaseName() {
-    return databaseName;
-  }
-
-  /**
-   * @return the tableName
-   */
-  public String getTableName() {
-    return tableName;
-  }
-
-  /**
-   * @return the limit
-   */
-  public int getLimit() {
-    return limit;
-  }
-
-  /**
-   * @param limit the limit to set
-   */
-  public void setLimit(int limit) {
-    this.limit = limit;
-  }
-
-  /**
-   * @return the detailQuery
-   */
-  public boolean isDetailQuery() {
-    return detailQuery;
-  }
-
-  /**
-   * @param detailQuery the detailQuery to set
-   */
-  public void setDetailQuery(boolean detailQuery) {
-    this.detailQuery = detailQuery;
-  }
-
-  public String getQueryId() {
-    return queryId;
-  }
-
-  public void setQueryId(String queryId) {
-    this.queryId = queryId;
-  }
-
-  public String getOutLocationPath() {
-    return outLocationPath;
-  }
-
-  public void setOutLocationPath(String outLocationPath) {
-    this.outLocationPath = outLocationPath;
-  }
-
-  public boolean isCountStarQuery() {
-    return isCountStartQuery;
-  }
-
-  public void setCountStartQuery(boolean isCountStartQuery) {
-    this.isCountStartQuery = isCountStartQuery;
-  }
-
-  public List<QueryDimension> getSortedDimemsions() {
-    return sortedDimensions;
-  }
-
-  public void setSortedDimemsions(List<QueryDimension> dims) {
-    this.sortedDimensions = dims;
-  }
-
-  public boolean isRawDetailQuery() {
-    return rawDetailQuery;
-  }
-
-  public void setRawDetailQuery(boolean rawDetailQuery) {
-    this.rawDetailQuery = rawDetailQuery;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/model/QueryColumn.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/model/QueryColumn.java b/core/src/main/java/org/carbondata/scan/model/QueryColumn.java
deleted file mode 100644
index 85ff41d..0000000
--- a/core/src/main/java/org/carbondata/scan/model/QueryColumn.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.model;
-
-import java.io.Serializable;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * query column  which will have information about column
- */
-public class QueryColumn implements Serializable {
-
-  /**
-   * serialVersionUID
-   */
-  private static final long serialVersionUID = -4222306600480181084L;
-
-  /**
-   * name of the column
-   */
-  protected String columnName;
-
-  /**
-   * sort order in which column output will be sorted default it will be none
-   */
-  private SortOrderType sortOrder = SortOrderType.NONE;
-
-  /**
-   * query order in which result of the query will be send
-   */
-  private int queryOrder;
-
-  /**
-   * aggregation function applied on column
-   */
-
-  private String aggregationFunction=CarbonCommonConstants.DUMMY;
-
-  public QueryColumn(String columnName) {
-    this.columnName = columnName;
-  }
-
-  /**
-   * @return the sortOrder
-   */
-  public SortOrderType getSortOrder() {
-    return sortOrder;
-  }
-
-  /**
-   * @param sortOrder the sortOrder to set
-   */
-  public void setSortOrder(SortOrderType sortOrder) {
-    this.sortOrder = sortOrder;
-  }
-
-  /**
-   * @return the columnName
-   */
-  public String getColumnName() {
-    return columnName;
-  }
-
-  /**
-   * @return the queryOrder
-   */
-  public int getQueryOrder() {
-    return queryOrder;
-  }
-
-  /**
-   * @param queryOrder the queryOrder to set
-   */
-  public void setQueryOrder(int queryOrder) {
-    this.queryOrder = queryOrder;
-  }
-
-  /**
-   * @return the aggregationFunction
-   */
-  public String getAggregateFunction() {
-    return aggregationFunction;
-  }
-
-  /**
-   * @param aggregationFunction the aggregationFunction to set
-   */
-  public void setAggregateFunction(String aggregationFunction) {
-    this.aggregationFunction = aggregationFunction;
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/model/QueryDimension.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/model/QueryDimension.java b/core/src/main/java/org/carbondata/scan/model/QueryDimension.java
deleted file mode 100644
index dc07173..0000000
--- a/core/src/main/java/org/carbondata/scan/model/QueryDimension.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.model;
-
-import java.io.Serializable;
-
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-
-/**
- * query plan dimension which will holds the information about the query plan dimension
- * this is done to avoid heavy object serialization
- */
-public class QueryDimension extends QueryColumn implements Serializable {
-
-  /**
-   * serialVersionUID
-   */
-  private static final long serialVersionUID = -8492704093776645651L;
-  /**
-   * actual dimension column
-   */
-  private transient CarbonDimension dimension;
-
-  public QueryDimension(String columName) {
-    super(columName);
-  }
-
-  /**
-   * @return the dimension
-   */
-  public CarbonDimension getDimension() {
-    return dimension;
-  }
-
-  /**
-   * @param dimension the dimension to set
-   */
-  public void setDimension(CarbonDimension dimension) {
-    this.dimension = dimension;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/model/QueryMeasure.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/model/QueryMeasure.java b/core/src/main/java/org/carbondata/scan/model/QueryMeasure.java
deleted file mode 100644
index 4035e61..0000000
--- a/core/src/main/java/org/carbondata/scan/model/QueryMeasure.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.model;
-
-import java.io.Serializable;
-
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-
-/**
- * query plan measure, this class will holds the information
- * about measure present in the query, this is done to avoid the serialization
- * of the heavy object
- */
-public class QueryMeasure extends QueryColumn implements Serializable {
-
-  /**
-   * serialVersionUID
-   */
-  private static final long serialVersionUID = 1035512411375495414L;
-
-  /**
-   * actual carbon measure object
-   */
-  private transient CarbonMeasure measure;
-
-  public QueryMeasure(String columName) {
-    super(columName);
-  }
-
-  /**
-   * @return the measure
-   */
-  public CarbonMeasure getMeasure() {
-    return measure;
-  }
-
-  /**
-   * @param measure the measure to set
-   */
-  public void setMeasure(CarbonMeasure measure) {
-    this.measure = measure;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/model/QueryModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/model/QueryModel.java b/core/src/main/java/org/carbondata/scan/model/QueryModel.java
deleted file mode 100644
index e299e7b..0000000
--- a/core/src/main/java/org/carbondata/scan/model/QueryModel.java
+++ /dev/null
@@ -1,507 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.model;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.TableBlockInfo;
-import org.carbondata.core.carbon.metadata.schema.table.CarbonTable;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonColumn;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-import org.carbondata.core.carbon.querystatistics.QueryStatisticsRecorder;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.UnknownExpression;
-import org.carbondata.scan.expression.conditional.ConditionalExpression;
-import org.carbondata.scan.filter.resolver.FilterResolverIntf;
-
-/**
- * Query model which will have all the detail
- * about the query, This will be sent from driver to executor '
- * This will be refereed to executing the query.
- */
-public class QueryModel implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = -4674677234007089052L;
-  /**
-   * this will hold the information about the dictionary dimension
-   * which to
-   */
-  public transient Map<String, Dictionary> columnToDictionaryMapping;
-  /**
-   * Number of records to keep in memory.
-   */
-  public int inMemoryRecordSize;
-  /**
-   * list of dimension selected for in query
-   */
-  private List<QueryDimension> queryDimension;
-  /**
-   * list of dimension in which sorting is applied
-   */
-  private List<QueryDimension> sortDimension;
-  /**
-   * list of measure selected in query
-   */
-  private List<QueryMeasure> queryMeasures;
-  /**
-   * query id
-   */
-  private String queryId;
-  /**
-   * to check if it a aggregate table
-   */
-  private boolean isAggTable;
-  /**
-   * filter tree
-   */
-  private FilterResolverIntf filterExpressionResolverTree;
-  /**
-   * in case of lime query we need to know how many
-   * records will passed from executor
-   */
-  private int limit;
-
-  /**
-   * to check if it is a count star query , so processing will be different
-   */
-  private boolean isCountStarQuery;
-  /**
-   * to check whether aggregation is required during query execution
-   */
-  private boolean detailQuery;
-  /**
-   * table block information in which query will be executed
-   */
-  private List<TableBlockInfo> tableBlockInfos;
-  /**
-   * sort in which dimension will be get sorted
-   */
-  private byte[] sortOrder;
-  /**
-   * absolute table identifier
-   */
-  private AbsoluteTableIdentifier absoluteTableIdentifier;
-  /**
-   * in case of detail query with sort we are spilling to disk
-   * to this location will be used to write the temp file in this location
-   */
-  private String queryTempLocation;
-  /**
-   * To handle most of the computation in query engines like spark and hive, carbon should give
-   * raw detailed records to it.
-   */
-  private boolean forcedDetailRawQuery;
-  /**
-   * paritition column list
-   */
-  private List<String> paritionColumns;
-  /**
-   * table on which query will be executed
-   * TODO need to remove this ad pass only the path
-   * and carbon metadata will load the table from metadata file
-   */
-  private CarbonTable table;
-
-  private QueryStatisticsRecorder statisticsRecorder;
-
-  public QueryModel() {
-    tableBlockInfos = new ArrayList<TableBlockInfo>();
-    queryDimension = new ArrayList<QueryDimension>();
-    queryMeasures = new ArrayList<QueryMeasure>();
-    sortDimension = new ArrayList<QueryDimension>();
-    sortOrder = new byte[0];
-    paritionColumns = new ArrayList<String>();
-  }
-
-  public static QueryModel createModel(AbsoluteTableIdentifier absoluteTableIdentifier,
-      CarbonQueryPlan queryPlan, CarbonTable carbonTable) {
-    QueryModel queryModel = new QueryModel();
-    String factTableName = carbonTable.getFactTableName();
-    queryModel.setAbsoluteTableIdentifier(absoluteTableIdentifier);
-
-    fillQueryModel(queryPlan, carbonTable, queryModel, factTableName);
-
-    queryModel.setLimit(queryPlan.getLimit());
-    queryModel.setDetailQuery(queryPlan.isDetailQuery());
-    queryModel.setForcedDetailRawQuery(queryPlan.isRawDetailQuery());
-    queryModel.setQueryId(queryPlan.getQueryId());
-    queryModel.setQueryTempLocation(queryPlan.getOutLocationPath());
-    return queryModel;
-  }
-
-  private static void fillQueryModel(CarbonQueryPlan queryPlan, CarbonTable carbonTable,
-      QueryModel queryModel, String factTableName) {
-    queryModel.setAbsoluteTableIdentifier(carbonTable.getAbsoluteTableIdentifier());
-    queryModel.setQueryDimension(queryPlan.getDimensions());
-    fillSortInfoInModel(queryModel, queryPlan.getSortedDimemsions());
-    queryModel.setQueryMeasures(queryPlan.getMeasures());
-    if (null != queryPlan.getFilterExpression()) {
-      processFilterExpression(queryPlan.getFilterExpression(),
-          carbonTable.getDimensionByTableName(factTableName),
-          carbonTable.getMeasureByTableName(factTableName));
-    }
-    queryModel.setCountStarQuery(queryPlan.isCountStarQuery());
-    //TODO need to remove this code, and executor will load the table
-    // from file metadata
-    queryModel.setTable(carbonTable);
-  }
-
-  private static void fillSortInfoInModel(QueryModel executorModel,
-      List<QueryDimension> sortedDims) {
-    if (null != sortedDims) {
-      byte[] sortOrderByteArray = new byte[sortedDims.size()];
-      int i = 0;
-      for (QueryColumn mdim : sortedDims) {
-        sortOrderByteArray[i++] = (byte) mdim.getSortOrder().ordinal();
-      }
-      executorModel.setSortOrder(sortOrderByteArray);
-      executorModel.setSortDimension(sortedDims);
-    } else {
-      executorModel.setSortOrder(new byte[0]);
-      executorModel.setSortDimension(new ArrayList<QueryDimension>(0));
-    }
-
-  }
-
-  public static void processFilterExpression(
-      Expression filterExpression, List<CarbonDimension> dimensions, List<CarbonMeasure> measures) {
-    if (null != filterExpression) {
-      if (null != filterExpression.getChildren() && filterExpression.getChildren().size() == 0) {
-        if (filterExpression instanceof ConditionalExpression) {
-          List<ColumnExpression> listOfCol =
-              ((ConditionalExpression) filterExpression).getColumnList();
-          for (ColumnExpression expression : listOfCol) {
-            setDimAndMsrColumnNode(dimensions, measures, (ColumnExpression) expression);
-          }
-
-        }
-      }
-      for (Expression expression : filterExpression.getChildren()) {
-
-        if (expression instanceof ColumnExpression) {
-          setDimAndMsrColumnNode(dimensions, measures, (ColumnExpression) expression);
-        } else if (expression instanceof UnknownExpression) {
-          UnknownExpression exp = ((UnknownExpression) expression);
-          List<ColumnExpression> listOfColExpression = exp.getAllColumnList();
-          for (ColumnExpression col : listOfColExpression) {
-            setDimAndMsrColumnNode(dimensions, measures, col);
-          }
-        } else {
-          processFilterExpression(expression, dimensions, measures);
-        }
-      }
-    }
-
-  }
-
-  private static CarbonMeasure getCarbonMetadataMeasure(String name, List<CarbonMeasure> measures) {
-    for (CarbonMeasure measure : measures) {
-      if (measure.getColName().equalsIgnoreCase(name)) {
-        return measure;
-      }
-    }
-    return null;
-  }
-
-  private static void setDimAndMsrColumnNode(List<CarbonDimension> dimensions,
-      List<CarbonMeasure> measures, ColumnExpression col) {
-    CarbonDimension dim;
-    CarbonMeasure msr;
-    String columnName;
-    columnName = col.getColumnName();
-    dim = CarbonUtil.findDimension(dimensions, columnName);
-    col.setCarbonColumn(dim);
-    col.setDimension(dim);
-    col.setDimension(true);
-    if (null == dim) {
-      msr = getCarbonMetadataMeasure(columnName, measures);
-      col.setCarbonColumn(msr);
-      col.setDimension(false);
-    }
-  }
-
-  /**
-   * It gets the projection columns
-   */
-  public CarbonColumn[] getProjectionColumns() {
-    CarbonColumn[] carbonColumns =
-        new CarbonColumn[getQueryDimension().size() + getQueryMeasures().size()];
-    for (QueryDimension dimension : getQueryDimension()) {
-      carbonColumns[dimension.getQueryOrder()] = dimension.getDimension();
-    }
-    for (QueryMeasure msr : getQueryMeasures()) {
-      carbonColumns[msr.getQueryOrder()] = msr.getMeasure();
-    }
-    return carbonColumns;
-  }
-
-  /**
-   * @return the queryDimension
-   */
-  public List<QueryDimension> getQueryDimension() {
-    return queryDimension;
-  }
-
-  /**
-   * @param queryDimension the queryDimension to set
-   */
-  public void setQueryDimension(List<QueryDimension> queryDimension) {
-    this.queryDimension = queryDimension;
-  }
-
-  /**
-   * @return the queryMeasures
-   */
-  public List<QueryMeasure> getQueryMeasures() {
-    return queryMeasures;
-  }
-
-  /**
-   * @param queryMeasures the queryMeasures to set
-   */
-  public void setQueryMeasures(List<QueryMeasure> queryMeasures) {
-    this.queryMeasures = queryMeasures;
-  }
-
-  /**
-   * @return the queryId
-   */
-  public String getQueryId() {
-    return queryId;
-  }
-
-  /**
-   * @param queryId the queryId to set
-   */
-  public void setQueryId(String queryId) {
-    this.queryId = queryId;
-  }
-
-  /**
-   * @return the isAggTable
-   */
-  public boolean isAggTable() {
-    return isAggTable;
-  }
-
-  /**
-   * @param isAggTable the isAggTable to set
-   */
-  public void setAggTable(boolean isAggTable) {
-    this.isAggTable = isAggTable;
-  }
-
-  /**
-   * @return the limit
-   */
-  public int getLimit() {
-    return limit;
-  }
-
-  /**
-   * @param limit the limit to set
-   */
-  public void setLimit(int limit) {
-    this.limit = limit;
-  }
-
-  /**
-   * @return the isCountStarQuery
-   */
-  public boolean isCountStarQuery() {
-    return isCountStarQuery;
-  }
-
-  /**
-   * @param isCountStarQuery the isCountStarQuery to set
-   */
-  public void setCountStarQuery(boolean isCountStarQuery) {
-    this.isCountStarQuery = isCountStarQuery;
-  }
-
-  /**
-   * @return the isdetailQuery
-   */
-  public boolean isDetailQuery() {
-    return detailQuery;
-  }
-
-  public void setDetailQuery(boolean detailQuery) {
-    this.detailQuery = detailQuery;
-  }
-
-  /**
-   * @return the tableBlockInfos
-   */
-  public List<TableBlockInfo> getTableBlockInfos() {
-    return tableBlockInfos;
-  }
-
-  /**
-   * @param tableBlockInfos the tableBlockInfos to set
-   */
-  public void setTableBlockInfos(List<TableBlockInfo> tableBlockInfos) {
-    this.tableBlockInfos = tableBlockInfos;
-  }
-
-  /**
-   * @return the queryTempLocation
-   */
-  public String getQueryTempLocation() {
-    return queryTempLocation;
-  }
-
-  /**
-   * @param queryTempLocation the queryTempLocation to set
-   */
-  public void setQueryTempLocation(String queryTempLocation) {
-    this.queryTempLocation = queryTempLocation;
-  }
-
-  /**
-   * @return the sortOrder
-   */
-  public byte[] getSortOrder() {
-    return sortOrder;
-  }
-
-  /**
-   * @param sortOrder the sortOrder to set
-   */
-  public void setSortOrder(byte[] sortOrder) {
-    this.sortOrder = sortOrder;
-  }
-
-  /**
-   * @return the sortDimension
-   */
-  public List<QueryDimension> getSortDimension() {
-    return sortDimension;
-  }
-
-  /**
-   * @param sortDimension the sortDimension to set
-   */
-  public void setSortDimension(List<QueryDimension> sortDimension) {
-    this.sortDimension = sortDimension;
-  }
-
-  /**
-   * @return the filterEvaluatorTree
-   */
-  public FilterResolverIntf getFilterExpressionResolverTree() {
-    return filterExpressionResolverTree;
-  }
-
-  public void setFilterExpressionResolverTree(FilterResolverIntf filterExpressionResolverTree) {
-    this.filterExpressionResolverTree = filterExpressionResolverTree;
-  }
-
-  /**
-   * @return the absoluteTableIdentifier
-   */
-  public AbsoluteTableIdentifier getAbsoluteTableIdentifier() {
-    return absoluteTableIdentifier;
-  }
-
-  /**
-   * @param absoluteTableIdentifier the absoluteTableIdentifier to set
-   */
-  public void setAbsoluteTableIdentifier(AbsoluteTableIdentifier absoluteTableIdentifier) {
-    this.absoluteTableIdentifier = absoluteTableIdentifier;
-  }
-
-  /**
-   * @return the paritionColumns
-   */
-  public List<String> getParitionColumns() {
-    return paritionColumns;
-  }
-
-  /**
-   * @param paritionColumns the paritionColumns to set
-   */
-  public void setParitionColumns(List<String> paritionColumns) {
-    this.paritionColumns = paritionColumns;
-  }
-
-  /**
-   * @return the table
-   */
-  public CarbonTable getTable() {
-    return table;
-  }
-
-  /**
-   * @param table the table to set
-   */
-  public void setTable(CarbonTable table) {
-    this.table = table;
-  }
-
-  public boolean isForcedDetailRawQuery() {
-    return forcedDetailRawQuery;
-  }
-
-  public void setForcedDetailRawQuery(boolean forcedDetailRawQuery) {
-    this.forcedDetailRawQuery = forcedDetailRawQuery;
-  }
-
-  /**
-   * @return
-   */
-  public Map<String, Dictionary> getColumnToDictionaryMapping() {
-    return columnToDictionaryMapping;
-  }
-
-  /**
-   * @param columnToDictionaryMapping
-   */
-  public void setColumnToDictionaryMapping(Map<String, Dictionary> columnToDictionaryMapping) {
-    this.columnToDictionaryMapping = columnToDictionaryMapping;
-  }
-
-  public int getInMemoryRecordSize() {
-    return inMemoryRecordSize;
-  }
-
-  public void setInMemoryRecordSize(int inMemoryRecordSize) {
-    this.inMemoryRecordSize = inMemoryRecordSize;
-  }
-
-  public QueryStatisticsRecorder getStatisticsRecorder() {
-    return statisticsRecorder;
-  }
-
-  public void setStatisticsRecorder(QueryStatisticsRecorder statisticsRecorder) {
-    this.statisticsRecorder = statisticsRecorder;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/model/QuerySchemaInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/model/QuerySchemaInfo.java b/core/src/main/java/org/carbondata/scan/model/QuerySchemaInfo.java
deleted file mode 100644
index 643e81b..0000000
--- a/core/src/main/java/org/carbondata/scan/model/QuerySchemaInfo.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.model;
-
-import java.io.Serializable;
-
-import org.carbondata.core.keygenerator.KeyGenerator;
-
-public class QuerySchemaInfo implements Serializable {
-
-  private int[] maskedByteIndexes;
-
-  private KeyGenerator keyGenerator;
-
-  private QueryDimension[] queryDimensions;
-
-  private QueryMeasure[] queryMeasures;
-
-  private int[] queryOrder;
-
-  private int[] queryReverseOrder;
-
-  public int[] getMaskedByteIndexes() {
-    return maskedByteIndexes;
-  }
-
-  public void setMaskedByteIndexes(int[] maskedByteIndexes) {
-    this.maskedByteIndexes = maskedByteIndexes;
-  }
-
-  public KeyGenerator getKeyGenerator() {
-    return keyGenerator;
-  }
-
-  public void setKeyGenerator(KeyGenerator keyGenerator) {
-    this.keyGenerator = keyGenerator;
-  }
-
-  public QueryDimension[] getQueryDimensions() {
-    return queryDimensions;
-  }
-
-  public void setQueryDimensions(QueryDimension[] queryDimensions) {
-    this.queryDimensions = queryDimensions;
-  }
-
-  public QueryMeasure[] getQueryMeasures() {
-    return queryMeasures;
-  }
-
-  public void setQueryMeasures(QueryMeasure[] queryMeasures) {
-    this.queryMeasures = queryMeasures;
-  }
-
-  public int[] getQueryOrder() {
-    return queryOrder;
-  }
-
-  public void setQueryOrder(int[] queryOrder) {
-    this.queryOrder = queryOrder;
-  }
-
-  public int[] getQueryReverseOrder() {
-    return queryReverseOrder;
-  }
-
-  public void setQueryReverseOrder(int[] queryReverseOrder) {
-    this.queryReverseOrder = queryReverseOrder;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/model/SortOrderType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/model/SortOrderType.java b/core/src/main/java/org/carbondata/scan/model/SortOrderType.java
deleted file mode 100644
index 00b9219..0000000
--- a/core/src/main/java/org/carbondata/scan/model/SortOrderType.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.model;
-
-/**
- * enum for sorting the columns
- */
-public enum SortOrderType {
-
-    /**
-     * Ascending order
-     */
-    ASC(0),
-
-    /**
-     * Descending order.
-     */
-    DSC(1),
-
-    /**
-     * No order mentioned
-     */
-    NONE(-1);
-  /**
-   * Order type in numeric
-   */
-  private int orderType;
-
-  SortOrderType(int orderType) {
-    this.orderType = orderType;
-  }
-
-  /**
-   * Order type in number
-   *
-   * @return orderType int
-   */
-  public int getOrderType() {
-    return orderType;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/processor/AbstractDataBlockIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/processor/AbstractDataBlockIterator.java b/core/src/main/java/org/carbondata/scan/processor/AbstractDataBlockIterator.java
deleted file mode 100644
index 0454d92..0000000
--- a/core/src/main/java/org/carbondata/scan/processor/AbstractDataBlockIterator.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.processor;
-
-import java.util.List;
-
-import org.carbondata.common.CarbonIterator;
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.scan.collector.ScannedResultCollector;
-import org.carbondata.scan.collector.impl.DictionaryBasedResultCollector;
-import org.carbondata.scan.collector.impl.RawBasedResultCollector;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.result.AbstractScannedResult;
-import org.carbondata.scan.scanner.BlockletScanner;
-import org.carbondata.scan.scanner.impl.FilterScanner;
-import org.carbondata.scan.scanner.impl.NonFilterScanner;
-
-/**
- * This abstract class provides a skeletal implementation of the
- * Block iterator.
- */
-public abstract class AbstractDataBlockIterator extends CarbonIterator<List<Object[]>> {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(AbstractDataBlockIterator.class.getName());
-  /**
-   * iterator which will be used to iterate over data blocks
-   */
-  protected CarbonIterator<DataRefNode> dataBlockIterator;
-
-  /**
-   * execution details
-   */
-  protected BlockExecutionInfo blockExecutionInfo;
-
-  /**
-   * result collector which will be used to aggregate the scanned result
-   */
-  protected ScannedResultCollector scannerResultAggregator;
-
-  /**
-   * processor which will be used to process the block processing can be
-   * filter processing or non filter processing
-   */
-  protected BlockletScanner blockletScanner;
-
-  /**
-   * to hold the data block
-   */
-  protected BlocksChunkHolder blocksChunkHolder;
-
-  /**
-   * batch size of result
-   */
-  protected int batchSize;
-
-  protected AbstractScannedResult scannedResult;
-
-  public AbstractDataBlockIterator(BlockExecutionInfo blockExecutionInfo,
-      FileHolder fileReader, int batchSize) {
-    this.blockExecutionInfo = blockExecutionInfo;
-    dataBlockIterator = new BlockletIterator(blockExecutionInfo.getFirstDataBlock(),
-        blockExecutionInfo.getNumberOfBlockToScan());
-    blocksChunkHolder = new BlocksChunkHolder(blockExecutionInfo.getTotalNumberDimensionBlock(),
-        blockExecutionInfo.getTotalNumberOfMeasureBlock());
-    blocksChunkHolder.setFileReader(fileReader);
-
-    if (blockExecutionInfo.getFilterExecuterTree() != null) {
-      blockletScanner = new FilterScanner(blockExecutionInfo);
-    } else {
-      blockletScanner = new NonFilterScanner(blockExecutionInfo);
-    }
-    if (blockExecutionInfo.isRawRecordDetailQuery()) {
-      this.scannerResultAggregator =
-          new RawBasedResultCollector(blockExecutionInfo);
-    } else {
-      this.scannerResultAggregator =
-          new DictionaryBasedResultCollector(blockExecutionInfo);
-    }
-    this.batchSize = batchSize;
-  }
-
-  public boolean hasNext() {
-    if (scannedResult != null && scannedResult.hasNext()) {
-      return true;
-    } else {
-      return dataBlockIterator.hasNext();
-    }
-  }
-
-  protected boolean updateScanner() {
-    try {
-      if (scannedResult != null && scannedResult.hasNext()) {
-        return true;
-      } else {
-        scannedResult = getNextScannedResult();
-        while (scannedResult != null) {
-          if (scannedResult.hasNext()) {
-            return true;
-          }
-          scannedResult = getNextScannedResult();
-        }
-        return false;
-      }
-    } catch (QueryExecutionException ex) {
-      throw new RuntimeException(ex);
-    }
-  }
-
-  private AbstractScannedResult getNextScannedResult() throws QueryExecutionException {
-    if (dataBlockIterator.hasNext()) {
-      blocksChunkHolder.setDataBlock(dataBlockIterator.next());
-      blocksChunkHolder.reset();
-      return blockletScanner.scanBlocklet(blocksChunkHolder);
-    }
-    return null;
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/processor/BlockletIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/processor/BlockletIterator.java b/core/src/main/java/org/carbondata/scan/processor/BlockletIterator.java
deleted file mode 100644
index 7cecf64..0000000
--- a/core/src/main/java/org/carbondata/scan/processor/BlockletIterator.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.processor;
-
-import org.carbondata.common.CarbonIterator;
-import org.carbondata.core.carbon.datastore.DataRefNode;
-
-/**
- * Below class will be used to iterate over data block
- */
-public class BlockletIterator extends CarbonIterator<DataRefNode> {
-  /**
-   * data store block
-   */
-  protected DataRefNode datablock;
-  /**
-   * block counter to keep a track how many block has been processed
-   */
-  private int blockCounter;
-
-  /**
-   * flag to be used to check any more data block is present or not
-   */
-  private boolean hasNext = true;
-
-  /**
-   * total number blocks assgned to this iterator
-   */
-  private long totalNumberOfBlocksToScan;
-
-  /**
-   * Constructor
-   *
-   * @param datablock                 first data block
-   * @param totalNumberOfBlocksToScan total number of blocks to be scanned
-   */
-  public BlockletIterator(DataRefNode datablock, long totalNumberOfBlocksToScan) {
-    this.datablock = datablock;
-    this.totalNumberOfBlocksToScan = totalNumberOfBlocksToScan;
-  }
-
-  /**
-   * is all the blocks assigned to this iterator has been processed
-   */
-  @Override public boolean hasNext() {
-    return hasNext;
-  }
-
-  @Override
-  /**
-   * To get the next block
-   * @return next data block
-   *
-   */
-  public DataRefNode next() {
-    // get the current blocks
-    DataRefNode datablockTemp = datablock;
-    // store the next data block
-    datablock = datablock.getNextDataRefNode();
-    // increment the counter
-    blockCounter++;
-    // if all the data block is processed then
-    // set the has next flag to false
-    // or if number of blocks assigned to this iterator is processed
-    // then also set the hasnext flag to false
-    if (null == datablock || blockCounter >= this.totalNumberOfBlocksToScan) {
-      hasNext = false;
-    }
-    return datablockTemp;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/processor/BlocksChunkHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/processor/BlocksChunkHolder.java b/core/src/main/java/org/carbondata/scan/processor/BlocksChunkHolder.java
deleted file mode 100644
index bbf0cd9..0000000
--- a/core/src/main/java/org/carbondata/scan/processor/BlocksChunkHolder.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.processor;
-
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
-import org.carbondata.core.datastorage.store.FileHolder;
-
-/**
- * Block chunk holder which will hold the dimension and
- * measure chunk
- */
-public class BlocksChunkHolder {
-
-  /**
-   * dimension column data chunk
-   */
-  private DimensionColumnDataChunk[] dimensionDataChunk;
-
-  /**
-   * measure column data chunk
-   */
-  private MeasureColumnDataChunk[] measureDataChunk;
-
-  /**
-   * file reader which will use to read the block from file
-   */
-  private FileHolder fileReader;
-
-  /**
-   * data block
-   */
-  private DataRefNode dataBlock;
-
-  public BlocksChunkHolder(int numberOfDimensionBlock, int numberOfMeasureBlock) {
-    dimensionDataChunk = new DimensionColumnDataChunk[numberOfDimensionBlock];
-    measureDataChunk = new MeasureColumnDataChunk[numberOfMeasureBlock];
-  }
-
-  /**
-   * @return the dimensionDataChunk
-   */
-  public DimensionColumnDataChunk[] getDimensionDataChunk() {
-    return dimensionDataChunk;
-  }
-
-  /**
-   * @param dimensionDataChunk the dimensionDataChunk to set
-   */
-  public void setDimensionDataChunk(DimensionColumnDataChunk[] dimensionDataChunk) {
-    this.dimensionDataChunk = dimensionDataChunk;
-  }
-
-  /**
-   * @return the measureDataChunk
-   */
-  public MeasureColumnDataChunk[] getMeasureDataChunk() {
-    return measureDataChunk;
-  }
-
-  /**
-   * @param measureDataChunk the measureDataChunk to set
-   */
-  public void setMeasureDataChunk(MeasureColumnDataChunk[] measureDataChunk) {
-    this.measureDataChunk = measureDataChunk;
-  }
-
-  /**
-   * @return the fileReader
-   */
-  public FileHolder getFileReader() {
-    return fileReader;
-  }
-
-  /**
-   * @param fileReader the fileReader to set
-   */
-  public void setFileReader(FileHolder fileReader) {
-    this.fileReader = fileReader;
-  }
-
-  /**
-   * @return the dataBlock
-   */
-  public DataRefNode getDataBlock() {
-    return dataBlock;
-  }
-
-  /**
-   * @param dataBlock the dataBlock to set
-   */
-  public void setDataBlock(DataRefNode dataBlock) {
-    this.dataBlock = dataBlock;
-  }
-
-  /***
-   * To reset the measure chunk and dimension chunk
-   * array
-   */
-  public void reset() {
-    for (int i = 0; i < measureDataChunk.length; i++) {
-      this.measureDataChunk[i] = null;
-    }
-    for (int i = 0; i < dimensionDataChunk.length; i++) {
-      this.dimensionDataChunk[i] = null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/processor/impl/DataBlockIteratorImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/processor/impl/DataBlockIteratorImpl.java b/core/src/main/java/org/carbondata/scan/processor/impl/DataBlockIteratorImpl.java
deleted file mode 100644
index 2471666..0000000
--- a/core/src/main/java/org/carbondata/scan/processor/impl/DataBlockIteratorImpl.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.processor.impl;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.processor.AbstractDataBlockIterator;
-
-/**
- * Below class will be used to process the block for detail query
- */
-public class DataBlockIteratorImpl extends AbstractDataBlockIterator {
-
-  /**
-   * DataBlockIteratorImpl Constructor
-   *
-   * @param blockExecutionInfo execution information
-   */
-  public DataBlockIteratorImpl(BlockExecutionInfo blockExecutionInfo, FileHolder fileReader,
-      int batchSize) {
-    super(blockExecutionInfo, fileReader, batchSize);
-  }
-
-  /**
-   * It scans the block and returns the result with @batchSize
-   *
-   * @return Result of @batchSize
-   */
-  public List<Object[]> next() {
-    List<Object[]> collectedResult = null;
-    if (updateScanner()) {
-      collectedResult = this.scannerResultAggregator.collectData(scannedResult, batchSize);
-      while (collectedResult.size() < batchSize && updateScanner()) {
-        List<Object[]> data = this.scannerResultAggregator
-            .collectData(scannedResult, batchSize - collectedResult.size());
-        collectedResult.addAll(data);
-      }
-    } else {
-      collectedResult = new ArrayList<>();
-    }
-    return collectedResult;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/result/AbstractScannedResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/result/AbstractScannedResult.java b/core/src/main/java/org/carbondata/scan/result/AbstractScannedResult.java
deleted file mode 100644
index d917c2a..0000000
--- a/core/src/main/java/org/carbondata/scan/result/AbstractScannedResult.java
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.result;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.math.BigDecimal;
-import java.util.Map;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-import org.carbondata.scan.filter.GenericQueryType;
-
-/**
- * Scanned result class which will store and provide the result on request
- */
-public abstract class AbstractScannedResult {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(AbstractScannedResult.class.getName());
-  /**
-   * current row number
-   */
-  protected int currentRow = -1;
-  /**
-   * row mapping indexes
-   */
-  protected int[] rowMapping;
-  /**
-   * key size of the fixed length column
-   */
-  private int fixedLengthKeySize;
-  /**
-   * total number of rows
-   */
-  private int totalNumberOfRows;
-  /**
-   * to keep track of number of rows process
-   */
-  private int rowCounter;
-  /**
-   * dimension column data chunk
-   */
-  private DimensionColumnDataChunk[] dataChunks;
-  /**
-   * measure column data chunk
-   */
-  private MeasureColumnDataChunk[] measureDataChunks;
-  /**
-   * dictionary column block index in file
-   */
-  private int[] dictionaryColumnBlockIndexes;
-
-  /**
-   * no dictionary column block index in file
-   */
-  private int[] noDictionaryColumnBlockIndexes;
-
-  /**
-   * column group to is key structure info
-   * which will be used to get the key from the complete
-   * column group key
-   * For example if only one dimension of the column group is selected
-   * then from complete column group key it will be used to mask the key and
-   * get the particular column key
-   */
-  private Map<Integer, KeyStructureInfo> columnGroupKeyStructureInfo;
-
-  /**
-   *
-   */
-  private Map<Integer, GenericQueryType> complexParentIndexToQueryMap;
-
-  private int totalDimensionsSize;
-
-  /**
-   * parent block indexes
-   */
-  private int[] complexParentBlockIndexes;
-
-  public AbstractScannedResult(BlockExecutionInfo blockExecutionInfo) {
-    this.fixedLengthKeySize = blockExecutionInfo.getFixedLengthKeySize();
-    this.noDictionaryColumnBlockIndexes = blockExecutionInfo.getNoDictionaryBlockIndexes();
-    this.dictionaryColumnBlockIndexes = blockExecutionInfo.getDictionaryColumnBlockIndex();
-    this.columnGroupKeyStructureInfo = blockExecutionInfo.getColumnGroupToKeyStructureInfo();
-    this.complexParentIndexToQueryMap = blockExecutionInfo.getComlexDimensionInfoMap();
-    this.complexParentBlockIndexes = blockExecutionInfo.getComplexColumnParentBlockIndexes();
-    this.totalDimensionsSize = blockExecutionInfo.getQueryDimensions().length;
-  }
-
-  /**
-   * Below method will be used to set the dimension chunks
-   * which will be used to create a row
-   *
-   * @param dataChunks dimension chunks used in query
-   */
-  public void setDimensionChunks(DimensionColumnDataChunk[] dataChunks) {
-    this.dataChunks = dataChunks;
-  }
-
-  /**
-   * Below method will be used to set the measure column chunks
-   *
-   * @param measureDataChunks measure data chunks
-   */
-  public void setMeasureChunks(MeasureColumnDataChunk[] measureDataChunks) {
-    this.measureDataChunks = measureDataChunks;
-  }
-
-  /**
-   * Below method will be used to get the chunk based in measure ordinal
-   *
-   * @param ordinal measure ordinal
-   * @return measure column chunk
-   */
-  public MeasureColumnDataChunk getMeasureChunk(int ordinal) {
-    return measureDataChunks[ordinal];
-  }
-
-  /**
-   * Below method will be used to get the key for all the dictionary dimensions
-   * which is present in the query
-   *
-   * @param rowId row id selected after scanning
-   * @return return the dictionary key
-   */
-  protected byte[] getDictionaryKeyArray(int rowId) {
-    byte[] completeKey = new byte[fixedLengthKeySize];
-    int offset = 0;
-    for (int i = 0; i < this.dictionaryColumnBlockIndexes.length; i++) {
-      offset += dataChunks[dictionaryColumnBlockIndexes[i]]
-          .fillChunkData(completeKey, offset, rowId,
-              columnGroupKeyStructureInfo.get(dictionaryColumnBlockIndexes[i]));
-    }
-    rowCounter++;
-    return completeKey;
-  }
-
-  /**
-   * Below method will be used to get the key for all the dictionary dimensions
-   * in integer array format which is present in the query
-   *
-   * @param rowId row id selected after scanning
-   * @return return the dictionary key
-   */
-  protected int[] getDictionaryKeyIntegerArray(int rowId) {
-    int[] completeKey = new int[totalDimensionsSize];
-    int column = 0;
-    for (int i = 0; i < this.dictionaryColumnBlockIndexes.length; i++) {
-      column = dataChunks[dictionaryColumnBlockIndexes[i]]
-          .fillConvertedChunkData(rowId, column, completeKey,
-              columnGroupKeyStructureInfo.get(dictionaryColumnBlockIndexes[i]));
-    }
-    rowCounter++;
-    return completeKey;
-  }
-
-  /**
-   * Just increment the counter incase of query only on measures.
-   */
-  public void incrementCounter() {
-    rowCounter ++;
-    currentRow ++;
-  }
-
-  /**
-   * Below method will be used to get the dimension data based on dimension
-   * ordinal and index
-   *
-   * @param dimOrdinal dimension ordinal present in the query
-   * @param rowId      row index
-   * @return dimension data based on row id
-   */
-  protected byte[] getDimensionData(int dimOrdinal, int rowId) {
-    return dataChunks[dimOrdinal].getChunkData(rowId);
-  }
-
-  /**
-   * Below method will be used to get the dimension key array
-   * for all the no dictionary dimension present in the query
-   *
-   * @param rowId row number
-   * @return no dictionary keys for all no dictionary dimension
-   */
-  protected byte[][] getNoDictionaryKeyArray(int rowId) {
-    byte[][] noDictionaryColumnsKeys = new byte[noDictionaryColumnBlockIndexes.length][];
-    int position = 0;
-    for (int i = 0; i < this.noDictionaryColumnBlockIndexes.length; i++) {
-      noDictionaryColumnsKeys[position++] =
-          dataChunks[noDictionaryColumnBlockIndexes[i]].getChunkData(rowId);
-    }
-    return noDictionaryColumnsKeys;
-  }
-
-  /**
-   * Below method will be used to get the dimension key array
-   * for all the no dictionary dimension present in the query
-   *
-   * @param rowId row number
-   * @return no dictionary keys for all no dictionary dimension
-   */
-  protected String[] getNoDictionaryKeyStringArray(int rowId) {
-    String[] noDictionaryColumnsKeys = new String[noDictionaryColumnBlockIndexes.length];
-    int position = 0;
-    for (int i = 0; i < this.noDictionaryColumnBlockIndexes.length; i++) {
-      noDictionaryColumnsKeys[position++] =
-          new String(dataChunks[noDictionaryColumnBlockIndexes[i]].getChunkData(rowId));
-    }
-    return noDictionaryColumnsKeys;
-  }
-
-  /**
-   * Below method will be used to get the complex type keys array based
-   * on row id for all the complex type dimension selected in query
-   *
-   * @param rowId row number
-   * @return complex type key array for all the complex dimension selected in query
-   */
-  protected byte[][] getComplexTypeKeyArray(int rowId) {
-    byte[][] complexTypeData = new byte[complexParentBlockIndexes.length][];
-    for (int i = 0; i < complexTypeData.length; i++) {
-      GenericQueryType genericQueryType =
-          complexParentIndexToQueryMap.get(complexParentBlockIndexes[i]);
-      ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
-      DataOutputStream dataOutput = new DataOutputStream(byteStream);
-      try {
-        genericQueryType.parseBlocksAndReturnComplexColumnByteArray(dataChunks, rowId, dataOutput);
-        complexTypeData[i] = byteStream.toByteArray();
-      } catch (IOException e) {
-        LOGGER.error(e);
-      } finally {
-        CarbonUtil.closeStreams(dataOutput);
-        CarbonUtil.closeStreams(byteStream);
-      }
-    }
-    return complexTypeData;
-  }
-
-  /**
-   * @return return the total number of row after scanning
-   */
-  public int numberOfOutputRows() {
-    return this.totalNumberOfRows;
-  }
-
-  /**
-   * to check whether any more row is present in the result
-   *
-   * @return
-   */
-  public boolean hasNext() {
-    return rowCounter < this.totalNumberOfRows;
-  }
-
-  /**
-   * As this class will be a flyweight object so
-   * for one block all the blocklet scanning will use same result object
-   * in that case we need to reset the counter to zero so
-   * for new result it will give the result from zero
-   */
-  public void reset() {
-    rowCounter = 0;
-    currentRow = -1;
-  }
-
-  /**
-   * @param totalNumberOfRows set total of number rows valid after scanning
-   */
-  public void setNumberOfRows(int totalNumberOfRows) {
-    this.totalNumberOfRows = totalNumberOfRows;
-  }
-
-  /**
-   * After applying filter it will return the  bit set with the valid row indexes
-   * so below method will be used to set the row indexes
-   *
-   * @param indexes
-   */
-  public void setIndexes(int[] indexes) {
-    this.rowMapping = indexes;
-  }
-
-  /**
-   * Below method will be used to check whether measure value is null or not
-   *
-   * @param ordinal  measure ordinal
-   * @param rowIndex row number to be checked
-   * @return whether it is null or not
-   */
-  protected boolean isNullMeasureValue(int ordinal, int rowIndex) {
-    return measureDataChunks[ordinal].getNullValueIndexHolder().getBitSet().get(rowIndex);
-  }
-
-  /**
-   * Below method will be used to get the measure value of
-   * long type
-   *
-   * @param ordinal  measure ordinal
-   * @param rowIndex row number of the measure value
-   * @return measure value of long type
-   */
-  protected long getLongMeasureValue(int ordinal, int rowIndex) {
-    return measureDataChunks[ordinal].getMeasureDataHolder().getReadableLongValueByIndex(rowIndex);
-  }
-
-  /**
-   * Below method will be used to get the measure value of double type
-   *
-   * @param ordinal  measure ordinal
-   * @param rowIndex row number
-   * @return measure value of double type
-   */
-  protected double getDoubleMeasureValue(int ordinal, int rowIndex) {
-    return measureDataChunks[ordinal].getMeasureDataHolder()
-        .getReadableDoubleValueByIndex(rowIndex);
-  }
-
-  /**
-   * Below method will be used to get the measure type of big decimal data type
-   *
-   * @param ordinal  ordinal of the of the measure
-   * @param rowIndex row number
-   * @return measure of big decimal type
-   */
-  protected BigDecimal getBigDecimalMeasureValue(int ordinal, int rowIndex) {
-    return measureDataChunks[ordinal].getMeasureDataHolder()
-        .getReadableBigDecimalValueByIndex(rowIndex);
-  }
-
-  /**
-   * will return the current valid row id
-   *
-   * @return valid row id
-   */
-  public abstract int getCurrenrRowId();
-
-  /**
-   * @return dictionary key array for all the dictionary dimension
-   * selected in query
-   */
-  public abstract byte[] getDictionaryKeyArray();
-
-  /**
-   * @return dictionary key array for all the dictionary dimension in integer array forat
-   * selected in query
-   */
-  public abstract int[] getDictionaryKeyIntegerArray();
-
-  /**
-   * Return the dimension data based on dimension ordinal
-   *
-   * @param dimensionOrdinal dimension ordinal
-   * @return dimension data
-   */
-  public abstract byte[] getDimensionKey(int dimensionOrdinal);
-
-  /**
-   * Below method will be used to get the complex type key array
-   *
-   * @return complex type key array
-   */
-  public abstract byte[][] getComplexTypeKeyArray();
-
-  /**
-   * Below method will be used to get the no dictionary key
-   * array for all the no dictionary dimension selected in query
-   *
-   * @return no dictionary key array for all the no dictionary dimension
-   */
-  public abstract byte[][] getNoDictionaryKeyArray();
-
-  /**
-   * Below method will be used to get the no dictionary key
-   * array in string array format for all the no dictionary dimension selected in query
-   *
-   * @return no dictionary key array for all the no dictionary dimension
-   */
-  public abstract String[] getNoDictionaryKeyStringArray();
-
-  /**
-   * Below method will be used to to check whether measure value
-   * is null or for a measure
-   *
-   * @param ordinal measure ordinal
-   * @return is null or not
-   */
-  public abstract boolean isNullMeasureValue(int ordinal);
-
-  /**
-   * Below method will be used to get the measure value for measure
-   * of long data type
-   *
-   * @param ordinal measure ordinal
-   * @return long value of measure
-   */
-  public abstract long getLongMeasureValue(int ordinal);
-
-  /**
-   * Below method will be used to get the value of measure of double
-   * type
-   *
-   * @param ordinal measure ordinal
-   * @return measure value
-   */
-  public abstract double getDoubleMeasureValue(int ordinal);
-
-  /**
-   * Below method will be used to get the data of big decimal type
-   * of a measure
-   *
-   * @param ordinal measure ordinal
-   * @return measure value
-   */
-  public abstract BigDecimal getBigDecimalMeasureValue(int ordinal);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/result/BatchResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/result/BatchResult.java b/core/src/main/java/org/carbondata/scan/result/BatchResult.java
deleted file mode 100644
index 456717e..0000000
--- a/core/src/main/java/org/carbondata/scan/result/BatchResult.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.result;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.NoSuchElementException;
-
-import org.carbondata.common.CarbonIterator;
-
-/**
- * Below class holds the query result
- */
-public class BatchResult extends CarbonIterator<Object[]> {
-
-  /**
-   * list of keys
-   */
-  protected List<Object[]> rows;
-
-  /**
-   * counter to check whether all the records are processed or not
-   */
-  protected int counter;
-
-  public BatchResult() {
-    this.rows = new ArrayList<>();
-  }
-
-  /**
-   * Below method will be used to get the rows
-   *
-   * @return
-   */
-  public List<Object[]> getRows() {
-    return rows;
-  }
-
-  /**
-   * Below method will be used to get the set the values
-   *
-   * @param rows
-   */
-  public void setRows(List<Object[]> rows) {
-    this.rows = rows;
-  }
-
-  /**
-   * This method will return one row at a time based on the counter given.
-   * @param counter
-   * @return
-   */
-  public Object[] getRawRow(int counter) {
-    return rows.get(counter);
-  }
-
-  /**
-   * For getting the total size.
-   * @return
-   */
-  public int getSize() {
-    return rows.size();
-  }
-
-
-  /**
-   * Returns {@code true} if the iteration has more elements.
-   *
-   * @return {@code true} if the iteration has more elements
-   */
-  @Override public boolean hasNext() {
-    return counter < rows.size();
-  }
-
-  /**
-   * Returns the next element in the iteration.
-   *
-   * @return the next element in the iteration
-   */
-  @Override public Object[] next() {
-    if (!hasNext()) {
-      throw new NoSuchElementException();
-    }
-    Object[] row = rows.get(counter);
-    counter++;
-    return row;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/result/Result.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/result/Result.java b/core/src/main/java/org/carbondata/scan/result/Result.java
deleted file mode 100644
index 98466bb..0000000
--- a/core/src/main/java/org/carbondata/scan/result/Result.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.result;
-
-import org.carbondata.scan.wrappers.ByteArrayWrapper;
-
-/**
- * Result interface for storing the result
- */
-public interface Result<K, V> {
-  /**
-   * Below method will be used to
-   * add the sccaed result
-   *
-   * @param result
-   */
-  void addScannedResult(K result);
-
-  /**
-   * Returns {@code true} if the iteration has more elements.
-   *
-   * @return {@code true} if the iteration has more elements
-   */
-  boolean hasNext();
-
-  /**
-   * Below method will return the result key
-   *
-   * @return key
-   */
-  ByteArrayWrapper getKey();
-
-  /**
-   * Below code will return the result value
-   *
-   * @return value
-   */
-  V[] getValue();
-
-  void merge(Result<K, V> otherResult);
-
-  /**
-   * Below method will be used to get the result
-   *
-   * @return
-   */
-  K getResult();
-
-  /**
-   * @return size of the result
-   */
-  int size();
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/result/impl/FilterQueryScannedResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/result/impl/FilterQueryScannedResult.java b/core/src/main/java/org/carbondata/scan/result/impl/FilterQueryScannedResult.java
deleted file mode 100644
index e519f9e..0000000
--- a/core/src/main/java/org/carbondata/scan/result/impl/FilterQueryScannedResult.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.result.impl;
-
-import java.math.BigDecimal;
-
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.result.AbstractScannedResult;
-
-/**
- * Result provider class in case of filter query
- * In case of filter query data will be send
- * based on filtered row index
- */
-public class FilterQueryScannedResult extends AbstractScannedResult {
-
-  public FilterQueryScannedResult(BlockExecutionInfo tableBlockExecutionInfos) {
-    super(tableBlockExecutionInfos);
-  }
-
-  /**
-   * @return dictionary key array for all the dictionary dimension
-   * selected in query
-   */
-  @Override public byte[] getDictionaryKeyArray() {
-    ++currentRow;
-    return getDictionaryKeyArray(rowMapping[currentRow]);
-  }
-
-  /**
-   * @return dictionary key integer array for all the dictionary dimension
-   * selected in query
-   */
-  @Override public int[] getDictionaryKeyIntegerArray() {
-    ++currentRow;
-    return getDictionaryKeyIntegerArray(rowMapping[currentRow]);
-  }
-
-  /**
-   * Below method will be used to get the complex type key array
-   *
-   * @return complex type key array
-   */
-  @Override public byte[][] getComplexTypeKeyArray() {
-    return getComplexTypeKeyArray(rowMapping[currentRow]);
-  }
-
-  /**
-   * Below method will be used to get the no dictionary key
-   * array for all the no dictionary dimension selected in query
-   *
-   * @return no dictionary key array for all the no dictionary dimension
-   */
-  @Override public byte[][] getNoDictionaryKeyArray() {
-    return getNoDictionaryKeyArray(rowMapping[currentRow]);
-  }
-
-  /**
-   * Below method will be used to get the no dictionary key
-   * string array for all the no dictionary dimension selected in query
-   *
-   * @return no dictionary key array for all the no dictionary dimension
-   */
-  @Override public String[] getNoDictionaryKeyStringArray() {
-    return getNoDictionaryKeyStringArray(rowMapping[currentRow]);
-  }
-
-  /**
-   * will return the current valid row id
-   *
-   * @return valid row id
-   */
-  @Override public int getCurrenrRowId() {
-    return rowMapping[currentRow];
-  }
-
-  /**
-   * Return the dimension data based on dimension ordinal
-   *
-   * @param dimensionOrdinal dimension ordinal
-   * @return dimension data
-   */
-  @Override public byte[] getDimensionKey(int dimensionOrdinal) {
-    return getDimensionData(dimensionOrdinal, rowMapping[currentRow]);
-  }
-
-  /**
-   * Below method will be used to to check whether measure value
-   * is null or for a measure
-   *
-   * @param ordinal measure ordinal
-   * @return is null or not
-   */
-  @Override public boolean isNullMeasureValue(int ordinal) {
-    return isNullMeasureValue(ordinal, rowMapping[currentRow]);
-  }
-
-  /**
-   * Below method will be used to get the measure value for measure
-   * of long data type
-   *
-   * @param ordinal measure ordinal
-   * @return long value of measure
-   */
-  @Override public long getLongMeasureValue(int ordinal) {
-    return getLongMeasureValue(ordinal, rowMapping[currentRow]);
-  }
-
-  /**
-   * Below method will be used to get the value of measure of double
-   * type
-   *
-   * @param ordinal measure ordinal
-   * @return measure value
-   */
-  @Override public double getDoubleMeasureValue(int ordinal) {
-    return getDoubleMeasureValue(ordinal, rowMapping[currentRow]);
-  }
-
-  /**
-   * Below method will be used to get the data of big decimal type
-   * of a measure
-   *
-   * @param ordinal measure ordinal
-   * @return measure value
-   */
-  @Override public BigDecimal getBigDecimalMeasureValue(int ordinal) {
-    return getBigDecimalMeasureValue(ordinal, rowMapping[currentRow]);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/result/impl/NonFilterQueryScannedResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/result/impl/NonFilterQueryScannedResult.java b/core/src/main/java/org/carbondata/scan/result/impl/NonFilterQueryScannedResult.java
deleted file mode 100644
index 13fa860..0000000
--- a/core/src/main/java/org/carbondata/scan/result/impl/NonFilterQueryScannedResult.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.result.impl;
-
-import java.math.BigDecimal;
-
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.result.AbstractScannedResult;
-
-/**
- * Result provide class for non filter query
- * In case of no filter query we need to return
- * complete data
- */
-public class NonFilterQueryScannedResult extends AbstractScannedResult {
-
-  public NonFilterQueryScannedResult(BlockExecutionInfo blockExecutionInfo) {
-    super(blockExecutionInfo);
-  }
-
-  /**
-   * @return dictionary key array for all the dictionary dimension selected in
-   * query
-   */
-  @Override public byte[] getDictionaryKeyArray() {
-    ++currentRow;
-    return getDictionaryKeyArray(currentRow);
-  }
-
-  /**
-   * @return dictionary key integer array for all the dictionary dimension
-   * selected in query
-   */
-  @Override public int[] getDictionaryKeyIntegerArray() {
-    ++currentRow;
-    return getDictionaryKeyIntegerArray(currentRow);
-  }
-
-  /**
-   * Below method will be used to get the complex type key array
-   *
-   * @return complex type key array
-   */
-  @Override public byte[][] getComplexTypeKeyArray() {
-    return getComplexTypeKeyArray(currentRow);
-  }
-
-  /**
-   * Below method will be used to get the no dictionary key array for all the
-   * no dictionary dimension selected in query
-   *
-   * @return no dictionary key array for all the no dictionary dimension
-   */
-  @Override public byte[][] getNoDictionaryKeyArray() {
-    return getNoDictionaryKeyArray(currentRow);
-  }
-
-  /**
-   * Below method will be used to get the no dictionary key
-   * string array for all the no dictionary dimension selected in query
-   *
-   * @return no dictionary key array for all the no dictionary dimension
-   */
-  @Override public String[] getNoDictionaryKeyStringArray() {
-    return getNoDictionaryKeyStringArray(currentRow);
-  }
-
-  /**
-   * will return the current valid row id
-   *
-   * @return valid row id
-   */
-  @Override public int getCurrenrRowId() {
-    return currentRow;
-  }
-
-  /**
-   * Return the dimension data based on dimension ordinal
-   *
-   * @param dimensionOrdinal dimension ordinal
-   * @return dimension data
-   */
-  @Override public byte[] getDimensionKey(int dimensionOrdinal) {
-    return getDimensionData(dimensionOrdinal, currentRow);
-  }
-
-  /**
-   * Below method will be used to to check whether measure value is null or
-   * for a measure
-   *
-   * @param ordinal measure ordinal
-   * @return is null or not
-   */
-  @Override public boolean isNullMeasureValue(int ordinal) {
-    return isNullMeasureValue(ordinal, currentRow);
-  }
-
-  /**
-   * Below method will be used to get the measure value for measure of long
-   * data type
-   *
-   * @param ordinal measure ordinal
-   * @return long value of measure
-   */
-  @Override public long getLongMeasureValue(int ordinal) {
-    return getLongMeasureValue(ordinal, currentRow);
-  }
-
-  /**
-   * Below method will be used to get the value of measure of double type
-   *
-   * @param ordinal measure ordinal
-   * @return measure value
-   */
-  @Override public double getDoubleMeasureValue(int ordinal) {
-    return getDoubleMeasureValue(ordinal, currentRow);
-  }
-
-  /**
-   * Below method will be used to get the data of big decimal type of a
-   * measure
-   *
-   * @param ordinal measure ordinal
-   * @return measure value
-   */
-  @Override public BigDecimal getBigDecimalMeasureValue(int ordinal) {
-    return getBigDecimalMeasureValue(ordinal, currentRow);
-  }
-
-}


[44/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/converter/ThriftWrapperSchemaConverterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/converter/ThriftWrapperSchemaConverterImpl.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/converter/ThriftWrapperSchemaConverterImpl.java
new file mode 100644
index 0000000..8cf929f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/converter/ThriftWrapperSchemaConverterImpl.java
@@ -0,0 +1,386 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.converter;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.carbon.metadata.schema.SchemaEvolution;
+import org.apache.carbondata.core.carbon.metadata.schema.SchemaEvolutionEntry;
+import org.apache.carbondata.core.carbon.metadata.schema.table.TableInfo;
+import org.apache.carbondata.core.carbon.metadata.schema.table.TableSchema;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+
+/**
+ * Thrift schema to carbon schema converter and vice versa
+ */
+public class ThriftWrapperSchemaConverterImpl implements SchemaConverter {
+
+  /* (non-Javadoc)
+   * Converts  from wrapper to thrift schema evolution entry
+   */
+  @Override
+  public org.apache.carbondata.format.SchemaEvolutionEntry
+       fromWrapperToExternalSchemaEvolutionEntry(SchemaEvolutionEntry wrapperSchemaEvolutionEntry) {
+    org.apache.carbondata.format.SchemaEvolutionEntry thriftSchemaEvolutionEntry =
+        new org.apache.carbondata.format.SchemaEvolutionEntry(
+            wrapperSchemaEvolutionEntry.getTimeStamp());
+
+    List<org.apache.carbondata.format.ColumnSchema> thriftAddedColumns =
+        new ArrayList<org.apache.carbondata.format.ColumnSchema>();
+    for (ColumnSchema wrapperColumnSchema : wrapperSchemaEvolutionEntry.getAdded()) {
+      thriftAddedColumns.add(fromWrapperToExternalColumnSchema(wrapperColumnSchema));
+    }
+
+    List<org.apache.carbondata.format.ColumnSchema> thriftRemovedColumns =
+        new ArrayList<org.apache.carbondata.format.ColumnSchema>();
+    for (ColumnSchema wrapperColumnSchema : wrapperSchemaEvolutionEntry.getRemoved()) {
+      thriftRemovedColumns.add(fromWrapperToExternalColumnSchema(wrapperColumnSchema));
+    }
+
+    thriftSchemaEvolutionEntry.setAdded(thriftAddedColumns);
+    thriftSchemaEvolutionEntry.setRemoved(thriftRemovedColumns);
+    return thriftSchemaEvolutionEntry;
+  }
+
+  /* (non-Javadoc)
+   * converts from wrapper to thrift schema evolution
+   */
+  @Override
+  public org.apache.carbondata.format.SchemaEvolution fromWrapperToExternalSchemaEvolution(
+      SchemaEvolution wrapperSchemaEvolution) {
+
+    List<org.apache.carbondata.format.SchemaEvolutionEntry> thriftSchemaEvolEntryList =
+        new ArrayList<org.apache.carbondata.format.SchemaEvolutionEntry>();
+    for (SchemaEvolutionEntry schemaEvolutionEntry : wrapperSchemaEvolution
+        .getSchemaEvolutionEntryList()) {
+      thriftSchemaEvolEntryList
+          .add(fromWrapperToExternalSchemaEvolutionEntry(schemaEvolutionEntry));
+    }
+    return new org.apache.carbondata.format.SchemaEvolution(thriftSchemaEvolEntryList);
+  }
+
+  /**
+   * converts from wrapper to external encoding
+   *
+   * @param encoder
+   * @return
+   */
+  private org.apache.carbondata.format.Encoding fromWrapperToExternalEncoding(Encoding encoder) {
+
+    if (null == encoder) {
+      return null;
+    }
+
+    switch (encoder) {
+      case DICTIONARY:
+        return org.apache.carbondata.format.Encoding.DICTIONARY;
+      case DELTA:
+        return org.apache.carbondata.format.Encoding.DELTA;
+      case RLE:
+        return org.apache.carbondata.format.Encoding.RLE;
+      case INVERTED_INDEX:
+        return org.apache.carbondata.format.Encoding.INVERTED_INDEX;
+      case BIT_PACKED:
+        return org.apache.carbondata.format.Encoding.BIT_PACKED;
+      case DIRECT_DICTIONARY:
+        return org.apache.carbondata.format.Encoding.DIRECT_DICTIONARY;
+      default:
+        return org.apache.carbondata.format.Encoding.DICTIONARY;
+    }
+  }
+
+  /**
+   * convert from wrapper to external data type
+   *
+   * @param dataType
+   * @return
+   */
+  private org.apache.carbondata.format.DataType fromWrapperToExternalDataType(DataType dataType) {
+
+    if (null == dataType) {
+      return null;
+    }
+    switch (dataType) {
+      case STRING:
+        return org.apache.carbondata.format.DataType.STRING;
+      case INT:
+        return org.apache.carbondata.format.DataType.INT;
+      case SHORT:
+        return org.apache.carbondata.format.DataType.SHORT;
+      case LONG:
+        return org.apache.carbondata.format.DataType.LONG;
+      case DOUBLE:
+        return org.apache.carbondata.format.DataType.DOUBLE;
+      case DECIMAL:
+        return org.apache.carbondata.format.DataType.DECIMAL;
+      case TIMESTAMP:
+        return org.apache.carbondata.format.DataType.TIMESTAMP;
+      case ARRAY:
+        return org.apache.carbondata.format.DataType.ARRAY;
+      case STRUCT:
+        return org.apache.carbondata.format.DataType.STRUCT;
+      default:
+        return org.apache.carbondata.format.DataType.STRING;
+    }
+  }
+
+  /* (non-Javadoc)
+   * convert from wrapper to external column schema
+   */
+  @Override public org.apache.carbondata.format.ColumnSchema fromWrapperToExternalColumnSchema(
+      ColumnSchema wrapperColumnSchema) {
+
+    List<org.apache.carbondata.format.Encoding> encoders =
+        new ArrayList<org.apache.carbondata.format.Encoding>();
+    for (Encoding encoder : wrapperColumnSchema.getEncodingList()) {
+      encoders.add(fromWrapperToExternalEncoding(encoder));
+    }
+    org.apache.carbondata.format.ColumnSchema thriftColumnSchema =
+        new org.apache.carbondata.format.ColumnSchema(
+            fromWrapperToExternalDataType(wrapperColumnSchema.getDataType()),
+            wrapperColumnSchema.getColumnName(), wrapperColumnSchema.getColumnUniqueId(),
+            wrapperColumnSchema.isColumnar(), encoders, wrapperColumnSchema.isDimensionColumn());
+    thriftColumnSchema.setColumn_group_id(wrapperColumnSchema.getColumnGroupId());
+    thriftColumnSchema.setScale(wrapperColumnSchema.getScale());
+    thriftColumnSchema.setPrecision(wrapperColumnSchema.getPrecision());
+    thriftColumnSchema.setNum_child(wrapperColumnSchema.getNumberOfChild());
+    thriftColumnSchema.setDefault_value(wrapperColumnSchema.getDefaultValue());
+    thriftColumnSchema.setColumnProperties(wrapperColumnSchema.getColumnProperties());
+    thriftColumnSchema.setInvisible(wrapperColumnSchema.isInvisible());
+    thriftColumnSchema.setColumnReferenceId(wrapperColumnSchema.getColumnReferenceId());
+    return thriftColumnSchema;
+  }
+
+  /* (non-Javadoc)
+   * convert from wrapper to external tableschema
+   */
+  @Override public org.apache.carbondata.format.TableSchema fromWrapperToExternalTableSchema(
+      TableSchema wrapperTableSchema) {
+
+    List<org.apache.carbondata.format.ColumnSchema> thriftColumnSchema =
+        new ArrayList<org.apache.carbondata.format.ColumnSchema>();
+    for (ColumnSchema wrapperColumnSchema : wrapperTableSchema.getListOfColumns()) {
+      thriftColumnSchema.add(fromWrapperToExternalColumnSchema(wrapperColumnSchema));
+    }
+    org.apache.carbondata.format.SchemaEvolution schemaEvolution =
+        fromWrapperToExternalSchemaEvolution(wrapperTableSchema.getSchemaEvalution());
+    return new org.apache.carbondata.format.TableSchema(wrapperTableSchema.getTableId(),
+        thriftColumnSchema, schemaEvolution);
+  }
+
+  /* (non-Javadoc)
+   * convert from wrapper to external tableinfo
+   */
+  @Override public org.apache.carbondata.format.TableInfo fromWrapperToExternalTableInfo(
+      TableInfo wrapperTableInfo, String dbName, String tableName) {
+
+    org.apache.carbondata.format.TableSchema thriftFactTable =
+        fromWrapperToExternalTableSchema(wrapperTableInfo.getFactTable());
+    List<org.apache.carbondata.format.TableSchema> thriftAggTables =
+        new ArrayList<org.apache.carbondata.format.TableSchema>();
+    for (TableSchema wrapperAggTableSchema : wrapperTableInfo.getAggregateTableList()) {
+      thriftAggTables.add(fromWrapperToExternalTableSchema(wrapperAggTableSchema));
+    }
+    return new org.apache.carbondata.format.TableInfo(thriftFactTable, thriftAggTables);
+  }
+
+  /* (non-Javadoc)
+   * convert from external to wrapper schema evolution entry
+   */
+  @Override public SchemaEvolutionEntry fromExternalToWrapperSchemaEvolutionEntry(
+      org.apache.carbondata.format.SchemaEvolutionEntry externalSchemaEvolutionEntry) {
+
+    SchemaEvolutionEntry wrapperSchemaEvolutionEntry = new SchemaEvolutionEntry();
+    wrapperSchemaEvolutionEntry.setTimeStamp(externalSchemaEvolutionEntry.getTime_stamp());
+
+    List<ColumnSchema> wrapperAddedColumns = new ArrayList<ColumnSchema>();
+    if (null != externalSchemaEvolutionEntry.getAdded()) {
+      for (org.apache.carbondata.format.ColumnSchema externalColumnSchema :
+          externalSchemaEvolutionEntry.getAdded()) {
+        wrapperAddedColumns.add(fromExternalToWrapperColumnSchema(externalColumnSchema));
+      }
+    }
+    List<ColumnSchema> wrapperRemovedColumns = new ArrayList<ColumnSchema>();
+    if (null != externalSchemaEvolutionEntry.getRemoved()) {
+      for (org.apache.carbondata.format.ColumnSchema externalColumnSchema :
+          externalSchemaEvolutionEntry.getRemoved()) {
+        wrapperRemovedColumns.add(fromExternalToWrapperColumnSchema(externalColumnSchema));
+      }
+    }
+
+    wrapperSchemaEvolutionEntry.setAdded(wrapperAddedColumns);
+    wrapperSchemaEvolutionEntry.setRemoved(wrapperRemovedColumns);
+    return wrapperSchemaEvolutionEntry;
+
+  }
+
+  /* (non-Javadoc)
+   * convert from external to wrapper schema evolution
+   */
+  @Override public SchemaEvolution fromExternalToWrapperSchemaEvolution(
+      org.apache.carbondata.format.SchemaEvolution externalSchemaEvolution) {
+    List<SchemaEvolutionEntry> wrapperSchemaEvolEntryList = new ArrayList<SchemaEvolutionEntry>();
+    for (org.apache.carbondata.format.SchemaEvolutionEntry schemaEvolutionEntry :
+        externalSchemaEvolution.getSchema_evolution_history()) {
+      wrapperSchemaEvolEntryList
+          .add(fromExternalToWrapperSchemaEvolutionEntry(schemaEvolutionEntry));
+    }
+    SchemaEvolution wrapperSchemaEvolution = new SchemaEvolution();
+    wrapperSchemaEvolution.setSchemaEvolutionEntryList(wrapperSchemaEvolEntryList);
+    return wrapperSchemaEvolution;
+  }
+
+  /**
+   * convert from external to wrapper encoding
+   *
+   * @param encoder
+   * @return
+   */
+  private Encoding fromExternalToWrapperEncoding(org.apache.carbondata.format.Encoding encoder) {
+    if (null == encoder) {
+      return null;
+    }
+    switch (encoder) {
+      case DICTIONARY:
+        return Encoding.DICTIONARY;
+      case DELTA:
+        return Encoding.DELTA;
+      case RLE:
+        return Encoding.RLE;
+      case INVERTED_INDEX:
+        return Encoding.INVERTED_INDEX;
+      case BIT_PACKED:
+        return Encoding.BIT_PACKED;
+      case DIRECT_DICTIONARY:
+        return Encoding.DIRECT_DICTIONARY;
+      default:
+        return Encoding.DICTIONARY;
+    }
+  }
+
+  /**
+   * convert from external to wrapper data type
+   *
+   * @param dataType
+   * @return
+   */
+  private DataType fromExternalToWrapperDataType(org.apache.carbondata.format.DataType dataType) {
+    if (null == dataType) {
+      return null;
+    }
+    switch (dataType) {
+      case STRING:
+        return DataType.STRING;
+      case INT:
+        return DataType.INT;
+      case SHORT:
+        return DataType.SHORT;
+      case LONG:
+        return DataType.LONG;
+      case DOUBLE:
+        return DataType.DOUBLE;
+      case DECIMAL:
+        return DataType.DECIMAL;
+      case TIMESTAMP:
+        return DataType.TIMESTAMP;
+      case ARRAY:
+        return DataType.ARRAY;
+      case STRUCT:
+        return DataType.STRUCT;
+      default:
+        return DataType.STRING;
+    }
+  }
+
+  /* (non-Javadoc)
+   * convert from external to wrapper columnschema
+   */
+  @Override public ColumnSchema fromExternalToWrapperColumnSchema(
+      org.apache.carbondata.format.ColumnSchema externalColumnSchema) {
+    ColumnSchema wrapperColumnSchema = new ColumnSchema();
+    wrapperColumnSchema.setColumnUniqueId(externalColumnSchema.getColumn_id());
+    wrapperColumnSchema.setColumnName(externalColumnSchema.getColumn_name());
+    wrapperColumnSchema.setColumnar(externalColumnSchema.isColumnar());
+    wrapperColumnSchema.setDataType(fromExternalToWrapperDataType(externalColumnSchema.data_type));
+    wrapperColumnSchema.setDimensionColumn(externalColumnSchema.isDimension());
+    List<Encoding> encoders = new ArrayList<Encoding>();
+    for (org.apache.carbondata.format.Encoding encoder : externalColumnSchema.getEncoders()) {
+      encoders.add(fromExternalToWrapperEncoding(encoder));
+    }
+    wrapperColumnSchema.setEncodingList(encoders);
+    wrapperColumnSchema.setNumberOfChild(externalColumnSchema.getNum_child());
+    wrapperColumnSchema.setPrecision(externalColumnSchema.getPrecision());
+    wrapperColumnSchema.setColumnGroup(externalColumnSchema.getColumn_group_id());
+    wrapperColumnSchema.setScale(externalColumnSchema.getScale());
+    wrapperColumnSchema.setDefaultValue(externalColumnSchema.getDefault_value());
+    wrapperColumnSchema.setAggregateFunction(externalColumnSchema.getAggregate_function());
+    wrapperColumnSchema.setColumnProperties(externalColumnSchema.getColumnProperties());
+    wrapperColumnSchema.setInvisible(externalColumnSchema.isInvisible());
+    wrapperColumnSchema.setColumnReferenceId(externalColumnSchema.getColumnReferenceId());
+    return wrapperColumnSchema;
+  }
+
+  /* (non-Javadoc)
+   * convert from external to wrapper tableschema
+   */
+  @Override public TableSchema fromExternalToWrapperTableSchema(
+      org.apache.carbondata.format.TableSchema externalTableSchema, String tableName) {
+    TableSchema wrapperTableSchema = new TableSchema();
+    wrapperTableSchema.setTableId(externalTableSchema.getTable_id());
+    wrapperTableSchema.setTableName(tableName);
+    List<ColumnSchema> listOfColumns = new ArrayList<ColumnSchema>();
+    for (org.apache.carbondata.format.ColumnSchema externalColumnSchema : externalTableSchema
+        .getTable_columns()) {
+      listOfColumns.add(fromExternalToWrapperColumnSchema(externalColumnSchema));
+    }
+    wrapperTableSchema.setListOfColumns(listOfColumns);
+    wrapperTableSchema.setSchemaEvalution(
+        fromExternalToWrapperSchemaEvolution(externalTableSchema.getSchema_evolution()));
+    return wrapperTableSchema;
+  }
+
+  /* (non-Javadoc)
+   * convert from external to wrapper tableinfo
+   */
+  @Override public TableInfo fromExternalToWrapperTableInfo(
+      org.apache.carbondata.format.TableInfo externalTableInfo, String dbName, String tableName,
+      String storePath) {
+    TableInfo wrapperTableInfo = new TableInfo();
+    wrapperTableInfo.setLastUpdatedTime(
+        externalTableInfo.getFact_table().getSchema_evolution().getSchema_evolution_history().get(0)
+            .getTime_stamp());
+    wrapperTableInfo.setDatabaseName(dbName);
+    wrapperTableInfo.setTableUniqueName(dbName + "_" + tableName);
+    wrapperTableInfo.setStorePath(storePath);
+    wrapperTableInfo.setFactTable(
+        fromExternalToWrapperTableSchema(externalTableInfo.getFact_table(), tableName));
+    List<TableSchema> aggTablesList = new ArrayList<TableSchema>();
+    int index = 0;
+    for (org.apache.carbondata.format.TableSchema aggTable : externalTableInfo
+        .getAggregate_table_list()) {
+      aggTablesList.add(fromExternalToWrapperTableSchema(aggTable, "agg_table_" + index));
+      index++;
+    }
+    return wrapperTableInfo;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/datatype/ConvertedType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/datatype/ConvertedType.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/datatype/ConvertedType.java
new file mode 100644
index 0000000..7b99ec2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/datatype/ConvertedType.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.datatype;
+
+public enum ConvertedType {
+
+  /**
+   * a BYTE_ARRAY actually contains UTF8 encoded chars
+   */
+  UTF8,
+  /**
+   * a map is converted as an optional field containing a repeated key/value pair
+   */
+  MAP,
+  /**
+   * a key/value pair is converted into a group of two fields
+   */
+  MAP_KEY_VALUE,
+  /**
+   * a list is converted into an optional field containing a repeated field for its
+   * values
+   */
+  LIST,
+  /**
+   * an enum is converted into a binary field
+   */
+  ENUM,
+  /**
+   * A decimal value.
+   * This may be used to annotate binary or fixed primitive types. The
+   * underlying byte array stores the unscaled value encoded as two's
+   * complement using big-endian byte order (the most significant byte is the
+   * zeroth element). The value of the decimal is the value * 10^{-scale}.
+   * This must be accompanied by a (maximum) precision and a scale in the
+   * SchemaElement. The precision specifies the number of digits in the decimal
+   * and the scale stores the location of the decimal point. For example 1.23
+   * would have precision 3 (3 total digits) and scale 2 (the decimal point is
+   * 2 digits over).
+   */
+  DECIMAL,
+  /**
+   * A Date
+   * Stored as days since Unix epoch, encoded as the INT32 physical type.
+   */
+  DATE,
+  /**
+   * A time
+   * The total number of milliseconds since midnight.  The value is stored
+   * as an INT32 physical type.
+   */
+  TIME_MILLIS,
+  /**
+   * A date/time combination
+   * Date and time recorded as milliseconds since the Unix epoch.  Recorded as
+   * a physical type of INT64.
+   */
+  TIMESTAMP_MILLIS,
+
+  RESERVED,
+  /**
+   * An unsigned integer value.
+   * The number describes the maximum number of meainful data bits in
+   * the stored value. 8, 16 and 32 bit values are stored using the
+   * INT32 physical type.  64 bit values are stored using the INT64
+   * physical type.
+   */
+  UINT_8,
+  UINT_16,
+  UINT_32,
+  UINT_64,
+  /**
+   * A signed integer value.
+   * The number describes the maximum number of meainful data bits in
+   * the stored value. 8, 16 and 32 bit values are stored using the
+   * INT32 physical type.  64 bit values are stored using the INT64
+   * physical type.
+   */
+  INT_8,
+  INT_16,
+  INT_32,
+  INT_64,
+  /**
+   * An embedded JSON document
+   * A JSON document embedded within a single UTF8 column.
+   */
+  JSON,
+
+  /**
+   * An embedded BSON document
+   * A BSON document embedded within a single BINARY column.
+   */
+  BSON,
+
+  /**
+   * An interval of time
+   * This type annotates data stored as a FIXED_LEN_BYTE_ARRAY of length 12
+   * This data is composed of three separate little endian unsigned
+   * integers.  Each stores a component of a duration of time.  The first
+   * integer identifies the number of months associated with the duration,
+   * the second identifies the number of days associated with the duration
+   * and the third identifies the number of milliseconds associated with
+   * the provided duration.  This duration of time is independent of any
+   * particular timezone or date.
+   */
+  INTERVAL;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/datatype/DataType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/datatype/DataType.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/datatype/DataType.java
new file mode 100644
index 0000000..ac7ef32
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/datatype/DataType.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.metadata.datatype;
+
+public enum DataType {
+
+  STRING(0),
+  DATE(1),
+  TIMESTAMP(2),
+  BOOLEAN(1),
+  SHORT(2),
+  INT(3),
+  FLOAT(4),
+  LONG(5),
+  DOUBLE(6),
+  NULL(7),
+  DECIMAL(8),
+  ARRAY(9),
+  STRUCT(10),
+  MAP(11);
+
+  private int presedenceOrder;
+
+  DataType(int value) {
+    this.presedenceOrder = value;
+  }
+
+  public int getPresedenceOrder() {
+    return presedenceOrder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/encoder/Encoding.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/encoder/Encoding.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/encoder/Encoding.java
new file mode 100644
index 0000000..416f8e7
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/encoder/Encoding.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.encoder;
+
+/**
+ * Encoding type supported in carbon
+ */
+public enum Encoding {
+  DICTIONARY,
+  DELTA,
+  RLE,
+  INVERTED_INDEX,
+  BIT_PACKED,
+  DIRECT_DICTIONARY;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/index/BlockIndexInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/index/BlockIndexInfo.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/index/BlockIndexInfo.java
new file mode 100644
index 0000000..9bdbc18
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/index/BlockIndexInfo.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.index;
+
+import org.apache.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex;
+
+/**
+ * Below class will be used hold the information
+ * about block index
+ */
+public class BlockIndexInfo {
+
+  /**
+   * total number of rows present in the file
+   */
+  private long numberOfRows;
+
+  /**
+   * file name
+   */
+  private String fileName;
+
+  /**
+   * offset of metadata in data file
+   */
+  private long offset;
+
+  /**
+   * to store min max and start and end key
+   */
+  private BlockletIndex blockletIndex;
+
+  /**
+   * Constructor
+   *
+   * @param numberOfRows  number of rows
+   * @param fileName      full qualified name
+   * @param offset        offset of the metadata in data file
+   * @param blockletIndex block let index
+   */
+  public BlockIndexInfo(long numberOfRows, String fileName, long offset,
+      BlockletIndex blockletIndex) {
+    this.numberOfRows = numberOfRows;
+    this.fileName = fileName;
+    this.offset = offset;
+    this.blockletIndex = blockletIndex;
+  }
+
+  /**
+   * @return the numberOfRows
+   */
+  public long getNumberOfRows() {
+    return numberOfRows;
+  }
+
+  /**
+   * @return the fileName
+   */
+  public String getFileName() {
+    return fileName;
+  }
+
+  /**
+   * @return the offset
+   */
+  public long getOffset() {
+    return offset;
+  }
+
+  /**
+   * @return the blockletIndex
+   */
+  public BlockletIndex getBlockletIndex() {
+    return blockletIndex;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/SchemaEvolution.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/SchemaEvolution.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/SchemaEvolution.java
new file mode 100644
index 0000000..9bbcce3
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/SchemaEvolution.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.schema;
+
+import java.io.Serializable;
+import java.util.List;
+
+/**
+ * Persisting schema restructuring information;
+ */
+public class SchemaEvolution implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = 8186224567517679868L;
+
+  /**
+   * list of schema evolution entry
+   */
+  private List<SchemaEvolutionEntry> schemaEvolutionEntryList;
+
+  /**
+   * @return the schemaEvolutionEntryList
+   */
+  public List<SchemaEvolutionEntry> getSchemaEvolutionEntryList() {
+    return schemaEvolutionEntryList;
+  }
+
+  /**
+   * @param schemaEvolutionEntryList the schemaEvolutionEntryList to set
+   */
+  public void setSchemaEvolutionEntryList(List<SchemaEvolutionEntry> schemaEvolutionEntryList) {
+    this.schemaEvolutionEntryList = schemaEvolutionEntryList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/SchemaEvolutionEntry.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/SchemaEvolutionEntry.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/SchemaEvolutionEntry.java
new file mode 100644
index 0000000..ec5fb96
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/SchemaEvolutionEntry.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.schema;
+
+import java.io.Serializable;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+
+/**
+ * Store the infomation about the schema evolution
+ */
+public class SchemaEvolutionEntry implements Serializable {
+
+  /**
+   * serilization version
+   */
+  private static final long serialVersionUID = -7619477063676325276L;
+
+  /**
+   * time stamp of restructuring
+   */
+  private long timeStamp;
+
+  /**
+   * new column added in restructuring
+   */
+  private List<ColumnSchema> added;
+
+  /**
+   * column removed in restructuring
+   */
+  private List<ColumnSchema> removed;
+
+  /**
+   * @return the timeStamp
+   */
+  public long getTimeStamp() {
+    return timeStamp;
+  }
+
+  /**
+   * @param timeStamp the timeStamp to set
+   */
+  public void setTimeStamp(long timeStamp) {
+    this.timeStamp = timeStamp;
+  }
+
+  /**
+   * @return the added
+   */
+  public List<ColumnSchema> getAdded() {
+    return added;
+  }
+
+  /**
+   * @param added the added to set
+   */
+  public void setAdded(List<ColumnSchema> added) {
+    this.added = added;
+  }
+
+  /**
+   * @return the removed
+   */
+  public List<ColumnSchema> getRemoved() {
+    return removed;
+  }
+
+  /**
+   * @param removed the removed to set
+   */
+  public void setRemoved(List<ColumnSchema> removed) {
+    this.removed = removed;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/CarbonTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/CarbonTable.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/CarbonTable.java
new file mode 100644
index 0000000..b99b8d3
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/CarbonTable.java
@@ -0,0 +1,393 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.metadata.schema.table;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+
+/**
+ * Mapping class for Carbon actual table
+ */
+public class CarbonTable implements Serializable {
+
+  /**
+   * serialization id
+   */
+  private static final long serialVersionUID = 8696507171227156445L;
+
+  /**
+   * Absolute table identifier
+   */
+  private AbsoluteTableIdentifier absoluteTableIdentifier;
+
+  /**
+   * TableName, Dimensions list
+   */
+  private Map<String, List<CarbonDimension>> tableDimensionsMap;
+
+  /**
+   * table measures list.
+   */
+  private Map<String, List<CarbonMeasure>> tableMeasuresMap;
+
+  /**
+   * tableUniqueName
+   */
+  private String tableUniqueName;
+
+  /**
+   * Aggregate tables name
+   */
+  private List<String> aggregateTablesName;
+
+  /**
+   * metadata file path (check if it is really required )
+   */
+  private String metaDataFilepath;
+
+  /**
+   * last updated time
+   */
+  private long tableLastUpdatedTime;
+
+  public CarbonTable() {
+    this.tableDimensionsMap = new HashMap<String, List<CarbonDimension>>();
+    this.tableMeasuresMap = new HashMap<String, List<CarbonMeasure>>();
+    this.aggregateTablesName = new ArrayList<String>();
+  }
+
+  /**
+   * @param tableInfo
+   */
+  public void loadCarbonTable(TableInfo tableInfo) {
+    this.tableLastUpdatedTime = tableInfo.getLastUpdatedTime();
+    this.tableUniqueName = tableInfo.getTableUniqueName();
+    this.metaDataFilepath = tableInfo.getMetaDataFilepath();
+    //setting unique table identifier
+    CarbonTableIdentifier carbontableIdentifier =
+        new CarbonTableIdentifier(tableInfo.getDatabaseName(),
+            tableInfo.getFactTable().getTableName(), tableInfo.getFactTable().getTableId());
+    this.absoluteTableIdentifier =
+        new AbsoluteTableIdentifier(tableInfo.getStorePath(), carbontableIdentifier);
+
+    fillDimensionsAndMeasuresForTables(tableInfo.getFactTable());
+    List<TableSchema> aggregateTableList = tableInfo.getAggregateTableList();
+    for (TableSchema aggTable : aggregateTableList) {
+      this.aggregateTablesName.add(aggTable.getTableName());
+      fillDimensionsAndMeasuresForTables(aggTable);
+    }
+  }
+
+  /**
+   * Fill dimensions and measures for carbon table
+   *
+   * @param tableSchema
+   */
+  private void fillDimensionsAndMeasuresForTables(TableSchema tableSchema) {
+    List<CarbonDimension> dimensions = new ArrayList<CarbonDimension>();
+    List<CarbonMeasure> measures = new ArrayList<CarbonMeasure>();
+    this.tableDimensionsMap.put(tableSchema.getTableName(), dimensions);
+    this.tableMeasuresMap.put(tableSchema.getTableName(), measures);
+    int dimensionOrdinal = 0;
+    int measureOrdinal = 0;
+    int keyOrdinal = 0;
+    int columnGroupOrdinal = -1;
+    int previousColumnGroupId = -1;
+    List<ColumnSchema> listOfColumns = tableSchema.getListOfColumns();
+    int complexTypeOrdinal = -1;
+    for (int i = 0; i < listOfColumns.size(); i++) {
+      ColumnSchema columnSchema = listOfColumns.get(i);
+      if (columnSchema.isDimensionColumn()) {
+        if (columnSchema.getNumberOfChild() > 0) {
+          CarbonDimension complexDimension =
+              new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1, ++complexTypeOrdinal);
+          complexDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
+          dimensions.add(complexDimension);
+          dimensionOrdinal =
+              readAllComplexTypeChildrens(dimensionOrdinal, columnSchema.getNumberOfChild(),
+                  listOfColumns, complexDimension);
+          i = dimensionOrdinal - 1;
+          complexTypeOrdinal = assignComplexOrdinal(complexDimension, complexTypeOrdinal);
+        } else {
+          if (!columnSchema.getEncodingList().contains(Encoding.DICTIONARY)) {
+            dimensions.add(new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1, -1));
+          } else if (columnSchema.getEncodingList().contains(Encoding.DICTIONARY)
+              && columnSchema.getColumnGroupId() == -1) {
+            dimensions
+                .add(new CarbonDimension(columnSchema, dimensionOrdinal++, keyOrdinal++, -1, -1));
+          } else {
+            columnGroupOrdinal =
+                previousColumnGroupId == columnSchema.getColumnGroupId() ? ++columnGroupOrdinal : 0;
+            previousColumnGroupId = columnSchema.getColumnGroupId();
+            dimensions.add(new CarbonDimension(columnSchema, dimensionOrdinal++, keyOrdinal++,
+                columnGroupOrdinal, -1));
+
+          }
+        }
+      } else {
+        measures.add(new CarbonMeasure(columnSchema, measureOrdinal++));
+      }
+    }
+  }
+
+  /**
+   * Read all primitive/complex children and set it as list of child carbon dimension to parent
+   * dimension
+   *
+   * @param dimensionOrdinal
+   * @param childCount
+   * @param listOfColumns
+   * @param parentDimension
+   * @return
+   */
+  private int readAllComplexTypeChildrens(int dimensionOrdinal, int childCount,
+      List<ColumnSchema> listOfColumns, CarbonDimension parentDimension) {
+    for (int i = 0; i < childCount; i++) {
+      ColumnSchema columnSchema = listOfColumns.get(dimensionOrdinal);
+      if (columnSchema.isDimensionColumn()) {
+        if (columnSchema.getNumberOfChild() > 0) {
+          CarbonDimension complexDimension =
+              new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1, -1);
+          complexDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
+          parentDimension.getListOfChildDimensions().add(complexDimension);
+          dimensionOrdinal =
+              readAllComplexTypeChildrens(dimensionOrdinal, columnSchema.getNumberOfChild(),
+                  listOfColumns, complexDimension);
+        } else {
+          parentDimension.getListOfChildDimensions()
+              .add(new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1, -1));
+        }
+      }
+    }
+    return dimensionOrdinal;
+  }
+
+  /**
+   * Read all primitive/complex children and set it as list of child carbon dimension to parent
+   * dimension
+   */
+  private int assignComplexOrdinal(CarbonDimension parentDimension, int complexDimensionOrdianl) {
+    for (int i = 0; i < parentDimension.getNumberOfChild(); i++) {
+      CarbonDimension dimension = parentDimension.getListOfChildDimensions().get(i);
+      if (dimension.getNumberOfChild() > 0) {
+        dimension.setComplexTypeOridnal(++complexDimensionOrdianl);
+        complexDimensionOrdianl = assignComplexOrdinal(dimension, complexDimensionOrdianl);
+      } else {
+        parentDimension.getListOfChildDimensions().get(i)
+            .setComplexTypeOridnal(++complexDimensionOrdianl);
+      }
+    }
+    return complexDimensionOrdianl;
+  }
+
+  /**
+   * @return the databaseName
+   */
+  public String getDatabaseName() {
+    return absoluteTableIdentifier.getCarbonTableIdentifier().getDatabaseName();
+  }
+
+  /**
+   * @return the tabelName
+   */
+  public String getFactTableName() {
+    return absoluteTableIdentifier.getCarbonTableIdentifier().getTableName();
+  }
+
+  /**
+   * @return the tableUniqueName
+   */
+  public String getTableUniqueName() {
+    return tableUniqueName;
+  }
+
+  /**
+   * @return the metaDataFilepath
+   */
+  public String getMetaDataFilepath() {
+    return metaDataFilepath;
+  }
+
+  /**
+   * @return storepath
+   */
+  public String getStorePath() {
+    return absoluteTableIdentifier.getStorePath();
+  }
+
+  /**
+   * @return list of aggregate TablesName
+   */
+  public List<String> getAggregateTablesName() {
+    return aggregateTablesName;
+  }
+
+  /**
+   * @return the tableLastUpdatedTime
+   */
+  public long getTableLastUpdatedTime() {
+    return tableLastUpdatedTime;
+  }
+
+  /**
+   * to get the number of dimension present in the table
+   *
+   * @param tableName
+   * @return number of dimension present the table
+   */
+  public int getNumberOfDimensions(String tableName) {
+    return tableDimensionsMap.get(tableName).size();
+  }
+
+  /**
+   * to get the number of measures present in the table
+   *
+   * @param tableName
+   * @return number of measures present the table
+   */
+  public int getNumberOfMeasures(String tableName) {
+    return tableMeasuresMap.get(tableName).size();
+  }
+
+  /**
+   * to get the all dimension of a table
+   *
+   * @param tableName
+   * @return all dimension of a table
+   */
+  public List<CarbonDimension> getDimensionByTableName(String tableName) {
+    return tableDimensionsMap.get(tableName);
+  }
+
+  /**
+   * to get the all measure of a table
+   *
+   * @param tableName
+   * @return all measure of a table
+   */
+  public List<CarbonMeasure> getMeasureByTableName(String tableName) {
+    return tableMeasuresMap.get(tableName);
+  }
+
+  /**
+   * to get particular measure from a table
+   *
+   * @param tableName
+   * @param columnName
+   * @return
+   */
+  public CarbonMeasure getMeasureByName(String tableName, String columnName) {
+    List<CarbonMeasure> measureList = tableMeasuresMap.get(tableName);
+    for (CarbonMeasure measure : measureList) {
+      if (measure.getColName().equalsIgnoreCase(columnName)) {
+        return measure;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * to get particular dimension from a table
+   *
+   * @param tableName
+   * @param columnName
+   * @return
+   */
+  public CarbonDimension getDimensionByName(String tableName, String columnName) {
+    List<CarbonDimension> dimList = tableDimensionsMap.get(tableName);
+    for (CarbonDimension dim : dimList) {
+      if (dim.getColName().equalsIgnoreCase(columnName)) {
+        return dim;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * gets all children dimension for complex type
+   *
+   * @param dimName
+   * @return list of child dimensions
+   */
+  public List<CarbonDimension> getChildren(String dimName) {
+    for (List<CarbonDimension> list : tableDimensionsMap.values()) {
+      List<CarbonDimension> childDims = getChildren(dimName, list);
+      if (childDims != null) {
+        return childDims;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * returns level 2 or more child dimensions
+   *
+   * @param dimName
+   * @param dimensions
+   * @return list of child dimensions
+   */
+  public List<CarbonDimension> getChildren(String dimName, List<CarbonDimension> dimensions) {
+    for (CarbonDimension carbonDimension : dimensions) {
+      if (carbonDimension.getColName().equals(dimName)) {
+        return carbonDimension.getListOfChildDimensions();
+      } else if (null != carbonDimension.getListOfChildDimensions()
+          && carbonDimension.getListOfChildDimensions().size() > 0) {
+        List<CarbonDimension> childDims =
+            getChildren(dimName, carbonDimension.getListOfChildDimensions());
+        if (childDims != null) {
+          return childDims;
+        }
+      }
+    }
+    return null;
+  }
+
+  /**
+   * @return absolute table identifier
+   */
+  public AbsoluteTableIdentifier getAbsoluteTableIdentifier() {
+    return absoluteTableIdentifier;
+  }
+
+  /**
+   * @return carbon table identifier
+   */
+  public CarbonTableIdentifier getCarbonTableIdentifier() {
+    return absoluteTableIdentifier.getCarbonTableIdentifier();
+  }
+
+  /**
+   * gets partition count for this table
+   * TODO: to be implemented while supporting partitioning
+   */
+  public int getPartitionCount() {
+    return 1;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/TableInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/TableInfo.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/TableInfo.java
new file mode 100644
index 0000000..888b898
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/TableInfo.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.schema.table;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * Store the information about the table.
+ * it stores the fact table as well as aggregate table present in the schema
+ */
+public class TableInfo implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = -5034287968314105193L;
+
+  /**
+   * name of the database;
+   */
+  private String databaseName;
+
+  /**
+   * table name to group fact table and aggregate table
+   */
+  private String tableUniqueName;
+
+  /**
+   * fact table information
+   */
+  private TableSchema factTable;
+
+  /**
+   * list of aggregate table
+   */
+  private List<TableSchema> aggregateTableList;
+
+  /**
+   * last updated time to update the table if any changes
+   */
+  private long lastUpdatedTime;
+
+  /**
+   * metadata file path (check if it is really required )
+   */
+  private String metaDataFilepath;
+
+  /**
+   * store location
+   */
+  private String storePath;
+
+  public TableInfo() {
+    aggregateTableList = new ArrayList<TableSchema>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+  }
+
+  /**
+   * @return the factTable
+   */
+  public TableSchema getFactTable() {
+    return factTable;
+  }
+
+  /**
+   * @param factTable the factTable to set
+   */
+  public void setFactTable(TableSchema factTable) {
+    this.factTable = factTable;
+  }
+
+  /**
+   * @return the aggregateTableList
+   */
+  public List<TableSchema> getAggregateTableList() {
+    return aggregateTableList;
+  }
+
+  /**
+   * @param aggregateTableList the aggregateTableList to set
+   */
+  public void setAggregateTableList(List<TableSchema> aggregateTableList) {
+    this.aggregateTableList = aggregateTableList;
+  }
+
+  /**
+   * @return the databaseName
+   */
+  public String getDatabaseName() {
+    return databaseName;
+  }
+
+  /**
+   * @param databaseName the databaseName to set
+   */
+  public void setDatabaseName(String databaseName) {
+    this.databaseName = databaseName;
+  }
+
+  public TableSchema getTableSchemaByName(String tableName) {
+    if (factTable.getTableName().equalsIgnoreCase(tableName)) {
+      return factTable;
+    }
+    for (TableSchema aggregatTableSchema : aggregateTableList) {
+      if (aggregatTableSchema.getTableName().equals(tableName)) {
+        return aggregatTableSchema;
+      }
+    }
+    return null;
+  }
+
+  public TableSchema getTableSchemaByTableId(String tableId) {
+    if (factTable.getTableId().equals(tableId)) {
+      return factTable;
+    }
+    for (TableSchema aggregatTableSchema : aggregateTableList) {
+      if (aggregatTableSchema.getTableId().equals(tableId)) {
+        return aggregatTableSchema;
+      }
+    }
+    return null;
+  }
+
+  public int getNumberOfAggregateTables() {
+    return aggregateTableList.size();
+  }
+
+  /**
+   * @return the tableUniqueName
+   */
+  public String getTableUniqueName() {
+    return tableUniqueName;
+  }
+
+  /**
+   * @param tableUniqueName the tableUniqueName to set
+   */
+  public void setTableUniqueName(String tableUniqueName) {
+    this.tableUniqueName = tableUniqueName;
+  }
+
+  /**
+   * @return the lastUpdatedTime
+   */
+  public long getLastUpdatedTime() {
+    return lastUpdatedTime;
+  }
+
+  /**
+   * @param lastUpdatedTime the lastUpdatedTime to set
+   */
+  public void setLastUpdatedTime(long lastUpdatedTime) {
+    this.lastUpdatedTime = lastUpdatedTime;
+  }
+
+  /**
+   * @return
+   */
+  public String getMetaDataFilepath() {
+    return metaDataFilepath;
+  }
+
+  /**
+   * @param metaDataFilepath
+   */
+  public void setMetaDataFilepath(String metaDataFilepath) {
+    this.metaDataFilepath = metaDataFilepath;
+  }
+
+  public String getStorePath() {
+    return storePath;
+  }
+
+  public void setStorePath(String storePath) {
+    this.storePath = storePath;
+  }
+
+  /**
+   * to generate the hash code
+   */
+  @Override public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((databaseName == null) ? 0 : databaseName.hashCode());
+    result = prime * result + ((tableUniqueName == null) ? 0 : tableUniqueName.hashCode());
+    return result;
+  }
+
+  /**
+   * Overridden equals method
+   */
+  @Override public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (!(obj instanceof TableInfo)) {
+      return false;
+    }
+    TableInfo other = (TableInfo) obj;
+    if (databaseName == null) {
+      if (other.databaseName != null) {
+        return false;
+      }
+    } else if (!tableUniqueName.equals(other.tableUniqueName)) {
+      return false;
+    }
+
+    if (tableUniqueName == null) {
+      if (other.tableUniqueName != null) {
+        return false;
+      }
+    } else if (!tableUniqueName.equals(other.tableUniqueName)) {
+      return false;
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/TableSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/TableSchema.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/TableSchema.java
new file mode 100644
index 0000000..ded5635
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/TableSchema.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.schema.table;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.schema.SchemaEvolution;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * Persisting the table information
+ */
+public class TableSchema implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = -1928614587722507026L;
+
+  /**
+   * table id
+   */
+  private String tableId;
+
+  /**
+   * table Name
+   */
+  private String tableName;
+
+  /**
+   * Columns in the table
+   */
+  private List<ColumnSchema> listOfColumns;
+
+  /**
+   * History of schema evolution of this table
+   */
+  private SchemaEvolution schemaEvalution;
+
+  public TableSchema() {
+    this.listOfColumns = new ArrayList<ColumnSchema>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+  }
+
+  /**
+   * @return the tableId
+   */
+  public String getTableId() {
+    return tableId;
+  }
+
+  /**
+   * @param tableId the tableId to set
+   */
+  public void setTableId(String tableId) {
+    this.tableId = tableId;
+  }
+
+  /**
+   * @return the listOfColumns
+   */
+  public List<ColumnSchema> getListOfColumns() {
+    return listOfColumns;
+  }
+
+  /**
+   * @param listOfColumns the listOfColumns to set
+   */
+  public void setListOfColumns(List<ColumnSchema> listOfColumns) {
+    this.listOfColumns = listOfColumns;
+  }
+
+  /**
+   * @return the schemaEvalution
+   */
+  public SchemaEvolution getSchemaEvalution() {
+    return schemaEvalution;
+  }
+
+  /**
+   * @param schemaEvalution the schemaEvalution to set
+   */
+  public void setSchemaEvalution(SchemaEvolution schemaEvalution) {
+    this.schemaEvalution = schemaEvalution;
+  }
+
+  /**
+   * @return the tableName
+   */
+  public String getTableName() {
+    return tableName;
+  }
+
+  /**
+   * @param tableName the tableName to set
+   */
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  /**
+   * to get the column schema present in the table by name
+   *
+   * @param columnName
+   * @return column schema if matches the name
+   */
+  public ColumnSchema getColumnSchemaByName(String columnName) {
+    for (ColumnSchema tableColumn : listOfColumns) {
+      if (tableColumn.getColumnName().equals(columnName)) {
+        return tableColumn;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * to get the column schema present in the table by unique id
+   *
+   * @param columnUniqueId
+   * @return column schema if matches the id
+   */
+  public ColumnSchema getColumnSchemaById(String columnUniqueId) {
+    for (ColumnSchema tableColumn : listOfColumns) {
+      if (tableColumn.getColumnUniqueId().equalsIgnoreCase(columnUniqueId)) {
+        return tableColumn;
+      }
+    }
+    return null;
+  }
+
+  @Override public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((tableId == null) ? 0 : tableId.hashCode());
+    result = prime * result + ((tableName == null) ? 0 : tableName.hashCode());
+    return result;
+  }
+
+  @Override public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    TableSchema other = (TableSchema) obj;
+    if (tableId == null) {
+      if (other.tableId != null) {
+        return false;
+      }
+    } else if (!tableId.equals(other.tableId)) {
+      return false;
+    }
+    if (tableName == null) {
+      if (other.tableName != null) {
+        return false;
+      }
+    } else if (!tableName.equals(other.tableName)) {
+      return false;
+    }
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonColumn.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonColumn.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonColumn.java
new file mode 100644
index 0000000..88119d0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonColumn.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.metadata.schema.table.column;
+
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+
+public class CarbonColumn implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = 3648269871256322681L;
+
+  /**
+   * column schema
+   */
+  protected ColumnSchema columnSchema;
+
+  /**
+   * table ordinal
+   */
+  protected int ordinal;
+
+  /**
+   * default value for in case of restructuring will be used when older
+   * segment does not have particular column
+   */
+  protected byte[] defaultValue;
+
+  /**
+   * Column identifier
+   */
+  protected ColumnIdentifier columnIdentifier;
+
+  public CarbonColumn(ColumnSchema columnSchema, int ordinal) {
+    this.columnSchema = columnSchema;
+    this.ordinal = ordinal;
+    this.columnIdentifier =
+        new ColumnIdentifier(getColumnId(), getColumnProperties(), getDataType());
+  }
+
+  /**
+   * @return columnar or row based
+   */
+  public boolean isColumnar() {
+    return columnSchema.isColumnar();
+  }
+
+  /**
+   * @return column unique id
+   */
+  public String getColumnId() {
+    return columnSchema.getColumnUniqueId();
+  }
+
+  /**
+   * @return the dataType
+   */
+  public DataType getDataType() {
+    return columnSchema.getDataType();
+  }
+
+  /**
+   * @return the colName
+   */
+  public String getColName() {
+    return columnSchema.getColumnName();
+  }
+
+  /**
+   * @return the ordinal
+   */
+  public int getOrdinal() {
+    return ordinal;
+  }
+
+  /**
+   * @return the list of encoder used in dimension
+   */
+  public List<Encoding> getEncoder() {
+    return columnSchema.getEncodingList();
+  }
+
+  /**
+   * @return row group id if it is row based
+   */
+  public int columnGroupId() {
+    return columnSchema.getColumnGroupId();
+  }
+
+  /**
+   * @return the defaultValue
+   */
+  public byte[] getDefaultValue() {
+    return defaultValue;
+  }
+
+  /**
+   * @param defaultValue the defaultValue to set
+   */
+  public void setDefaultValue(byte[] defaultValue) {
+    this.defaultValue = defaultValue;
+  }
+
+  /**
+   * @param encoding
+   * @return true if contains the passing encoding
+   */
+  public boolean hasEncoding(Encoding encoding) {
+    return columnSchema.hasEncoding(encoding);
+  }
+
+  /**
+   * @return if DataType is ARRAY or STRUCT, this method return true, else
+   * false.
+   */
+  public Boolean isComplex() {
+    return columnSchema.isComplex();
+  }
+
+  /**
+   * @return if column is dimension return true, else false.
+   */
+  public Boolean isDimesion() {
+    return columnSchema.isDimensionColumn();
+  }
+
+  /**
+   * @return if column use inverted index return true, else false.
+   */
+  public Boolean isUseInvertedIndnex() {
+    return columnSchema.isUseInvertedIndex();
+  }
+  public ColumnSchema getColumnSchema() {
+    return this.columnSchema;
+  }
+
+  /**
+   * @return columnproperty
+   */
+  public Map<String, String> getColumnProperties() {
+    return this.columnSchema.getColumnProperties();
+  }
+
+  /**
+   * @return columnIdentifier
+   */
+  public ColumnIdentifier getColumnIdentifier() {
+    return this.columnIdentifier;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonDimension.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonDimension.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonDimension.java
new file mode 100644
index 0000000..c81cea4
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonDimension.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.metadata.schema.table.column;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+
+public class CarbonDimension extends CarbonColumn {
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = 3648269871656322681L;
+
+  /**
+   * List of child dimension for complex type
+   */
+  private List<CarbonDimension> listOfChildDimensions;
+
+  /**
+   * in case of dictionary dimension this will store the ordinal
+   * of the dimension in mdkey
+   */
+  private int keyOrdinal;
+
+  /**
+   * column group column ordinal
+   * for example if column is second column in the group
+   * it will store 2
+   */
+  private int columnGroupOrdinal;
+
+  /**
+   * to store complex type dimension ordinal
+   */
+  private int complexTypeOrdinal;
+
+  public CarbonDimension(ColumnSchema columnSchema, int ordinal, int keyOrdinal,
+      int columnGroupOrdinal, int complexTypeOrdinal) {
+    super(columnSchema, ordinal);
+    this.keyOrdinal = keyOrdinal;
+    this.columnGroupOrdinal = columnGroupOrdinal;
+    this.complexTypeOrdinal = complexTypeOrdinal;
+  }
+
+  /**
+   * this method will initialize list based on number of child dimensions Count
+   */
+  public void initializeChildDimensionsList(int childDimension) {
+    listOfChildDimensions = new ArrayList<CarbonDimension>(childDimension);
+  }
+
+  /**
+   * @return number of children for complex type
+   */
+  public int getNumberOfChild() {
+    return columnSchema.getNumberOfChild();
+  }
+
+  /**
+   * @return list of children dims for complex type
+   */
+  public List<CarbonDimension> getListOfChildDimensions() {
+    return listOfChildDimensions;
+  }
+
+  /**
+   * @return return the number of child present in case of complex type
+   */
+  public int numberOfChild() {
+    return columnSchema.getNumberOfChild();
+  }
+
+  public boolean hasEncoding(Encoding encoding) {
+    return columnSchema.getEncodingList().contains(encoding);
+  }
+
+  /**
+   * @return the keyOrdinal
+   */
+  public int getKeyOrdinal() {
+    return keyOrdinal;
+  }
+
+  /**
+   * @return the columnGroupOrdinal
+   */
+  public int getColumnGroupOrdinal() {
+    return columnGroupOrdinal;
+  }
+
+  /**
+   * @return the complexTypeOrdinal
+   */
+  public int getComplexTypeOrdinal() {
+    return complexTypeOrdinal;
+  }
+
+  public void setComplexTypeOridnal(int complexTypeOrdinal) {
+    this.complexTypeOrdinal = complexTypeOrdinal;
+  }
+
+  /**
+   * to generate the hash code for this class
+   */
+  @Override public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((columnSchema == null) ? 0 : columnSchema.hashCode());
+    return result;
+  }
+
+  /**
+   * to check whether to dimension are equal or not
+   */
+  @Override public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (!(obj instanceof CarbonDimension)) {
+      return false;
+    }
+    CarbonDimension other = (CarbonDimension) obj;
+    if (columnSchema == null) {
+      if (other.columnSchema != null) {
+        return false;
+      }
+    } else if (!columnSchema.equals(other.columnSchema)) {
+      return false;
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonMeasure.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonMeasure.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonMeasure.java
new file mode 100644
index 0000000..2c8c11e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/CarbonMeasure.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.metadata.schema.table.column;
+
+/**
+ * class represent column(measure) in table
+ */
+public class CarbonMeasure extends CarbonColumn {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = 354341488059013977L;
+
+  /**
+   * aggregator chosen for measure
+   */
+  private String aggregateFunction;
+
+  /**
+   * Used when this column contains decimal data.
+   */
+  private int scale;
+
+  /**
+   * precision in decimal data
+   */
+  private int precision;
+
+  public CarbonMeasure(ColumnSchema columnSchema, int ordinal) {
+    super(columnSchema, ordinal);
+    this.scale = columnSchema.getScale();
+    this.precision = columnSchema.getPrecision();
+  }
+
+  /**
+   * @return the scale
+   */
+  public int getScale() {
+    return scale;
+  }
+
+  /**
+   * @return the precision
+   */
+  public int getPrecision() {
+    return precision;
+  }
+
+  /**
+   * @return the aggregator
+   */
+  public String getAggregateFunction() {
+    return aggregateFunction;
+  }
+
+  /**
+   * @param aggregateFunction the aggregateFunction to set
+   */
+  public void setAggregateFunction(String aggregateFunction) {
+    this.aggregateFunction = aggregateFunction;
+  }
+
+  /**
+   * to check whether to dimension are equal or not
+   */
+  @Override public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (!(obj instanceof CarbonMeasure)) {
+      return false;
+    }
+    CarbonMeasure other = (CarbonMeasure) obj;
+    if (columnSchema == null) {
+      if (other.columnSchema != null) {
+        return false;
+      }
+    } else if (!columnSchema.equals(other.columnSchema)) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * hash code
+   * @return
+   */
+  @Override public int hashCode() {
+    return this.getColumnSchema().getColumnUniqueId().hashCode();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/ColumnSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/ColumnSchema.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/ColumnSchema.java
new file mode 100644
index 0000000..270702a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/schema/table/column/ColumnSchema.java
@@ -0,0 +1,418 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.schema.table.column;
+
+import java.io.Serializable;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+
+/**
+ * Store the information about the column meta data present the table
+ */
+public class ColumnSchema implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = 7676766554874863763L;
+
+  /**
+   * dataType
+   */
+  private DataType dataType;
+  /**
+   * Name of the column. If it is a complex data type, we follow a naming rule
+   * grand_parent_column.parent_column.child_column
+   * For Array types, two columns will be stored one for
+   * the array type and one for the primitive type with
+   * the name parent_column.value
+   */
+  private String columnName;
+
+  /**
+   * Unique ID for a column. if this is dimension,
+   * it is an unique ID that used in dictionary
+   */
+  private String columnUniqueId;
+
+  /**
+   * column reference id
+   */
+  private String columnReferenceId;
+
+  /**
+   * whether it is stored as columnar format or row format
+   */
+  private boolean isColumnar = true;
+
+  /**
+   * List of encoding that are chained to encode the data for this column
+   */
+  private List<Encoding> encodingList;
+
+  /**
+   * Whether the column is a dimension or measure
+   */
+  private boolean isDimensionColumn;
+
+  /**
+   * Whether the column should use inverted index
+   */
+  private boolean useInvertedIndex;
+
+  /**
+   * The group ID for column used for row format columns,
+   * where in columns in each group are chunked together.
+   */
+  private int columnGroupId = -1;
+
+  /**
+   * Used when this column contains decimal data.
+   */
+  private int scale;
+
+  private int precision;
+
+  /**
+   * Nested fields.  Since thrift does not support nested fields,
+   * the nesting is flattened to a single list by a depth-first traversal.
+   * The children count is used to construct the nested relationship.
+   * This field is not set when the element is a primitive type
+   */
+  private int numberOfChild;
+
+  /**
+   * Used when this column is part of an aggregate function.
+   */
+  private String aggregateFunction;
+
+  /**
+   * used in case of schema restructuring
+   */
+  private byte[] defaultValue;
+
+  /**
+   * Column properties
+   */
+  private Map<String, String> columnProperties;
+
+  /**
+   * used to define the column visibility of column default is false
+   */
+  private boolean invisible = false;
+
+  /**
+   * @return the columnName
+   */
+  public String getColumnName() {
+    return columnName;
+  }
+
+  /**
+   * @param columnName the columnName to set
+   */
+  public void setColumnName(String columnName) {
+    this.columnName = columnName;
+  }
+
+  /**
+   * @return the columnUniqueId
+   */
+  public String getColumnUniqueId() {
+    return columnUniqueId;
+  }
+
+  /**
+   * @param columnUniqueId the columnUniqueId to set
+   */
+  public void setColumnUniqueId(String columnUniqueId) {
+    this.columnUniqueId = columnUniqueId;
+  }
+
+  /**
+   * @return the isColumnar
+   */
+  public boolean isColumnar() {
+    return isColumnar;
+  }
+
+  /**
+   * @param isColumnar the isColumnar to set
+   */
+  public void setColumnar(boolean isColumnar) {
+    this.isColumnar = isColumnar;
+  }
+
+  /**
+   * @return the isDimensionColumn
+   */
+  public boolean isDimensionColumn() {
+    return isDimensionColumn;
+  }
+
+  /**
+   * @param isDimensionColumn the isDimensionColumn to set
+   */
+  public void setDimensionColumn(boolean isDimensionColumn) {
+    this.isDimensionColumn = isDimensionColumn;
+  }
+
+  /**
+   * the isUseInvertedIndex
+   */
+  public boolean isUseInvertedIndex() {
+    return useInvertedIndex;
+  }
+
+  /**
+   * @param useInvertedIndex the useInvertedIndex to set
+   */
+  public void setUseInvertedIndex(boolean useInvertedIndex) {
+    this.useInvertedIndex = useInvertedIndex;
+  }
+
+  /**
+   * @return the columnGroup
+   */
+  public int getColumnGroupId() {
+    return columnGroupId;
+  }
+
+  /**
+   * @param columnGroup the columnGroup to set
+   */
+  public void setColumnGroup(int columnGroupId) {
+    this.columnGroupId = columnGroupId;
+  }
+
+  /**
+   * @return the scale
+   */
+  public int getScale() {
+    return scale;
+  }
+
+  /**
+   * @param scale the scale to set
+   */
+  public void setScale(int scale) {
+    this.scale = scale;
+  }
+
+  /**
+   * @return the precision
+   */
+  public int getPrecision() {
+    return precision;
+  }
+
+  /**
+   * @param precision the precision to set
+   */
+  public void setPrecision(int precision) {
+    this.precision = precision;
+  }
+
+  /**
+   * @return the numberOfChild
+   */
+  public int getNumberOfChild() {
+    return numberOfChild;
+  }
+
+  /**
+   * @param numberOfChild the numberOfChild to set
+   */
+  public void setNumberOfChild(int numberOfChild) {
+    this.numberOfChild = numberOfChild;
+  }
+
+  /**
+   * @return the aggregator
+   */
+  public String getAggregateFunction() {
+    return aggregateFunction;
+  }
+
+  /**
+   * @param aggregateFunction the aggregator to set
+   */
+  public void setAggregateFunction(String aggregateFunction) {
+    this.aggregateFunction = aggregateFunction;
+  }
+
+  /**
+   * @return the defaultValue
+   */
+  public byte[] getDefaultValue() {
+    return defaultValue;
+  }
+
+  /**
+   * @param defaultValue the defaultValue to set
+   */
+  public void setDefaultValue(byte[] defaultValue) {
+    this.defaultValue = defaultValue;
+  }
+
+  /**
+   * hash code method to check get the hashcode based.
+   * for generating the hash code only column name and column unique id will considered
+   */
+  @Override public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((columnName == null) ? 0 : columnName.hashCode());
+    return result;
+  }
+
+  /**
+   * Overridden equals method for columnSchema
+   */
+  @Override public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (!(obj instanceof ColumnSchema)) {
+      return false;
+    }
+    ColumnSchema other = (ColumnSchema) obj;
+    if (columnName == null) {
+      if (other.columnName != null) {
+        return false;
+      }
+    } else if (!columnName.equals(other.columnName)) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * @return the dataType
+   */
+  public DataType getDataType() {
+    return dataType;
+  }
+
+  /**
+   * @param dataType the dataType to set
+   */
+  public void setDataType(DataType dataType) {
+    this.dataType = dataType;
+  }
+
+  /**
+   * @return the encoderList
+   */
+  public List<Encoding> getEncodingList() {
+    return encodingList;
+  }
+
+  /**
+   * @param encoderList the encoderList to set
+   */
+  public void setEncodingList(List<Encoding> encodingList) {
+    this.encodingList = encodingList;
+  }
+
+  /**
+   * @param encoding
+   * @return true if contains the passing encoding
+   */
+  public boolean hasEncoding(Encoding encoding) {
+    if (encodingList == null || encodingList.isEmpty()) {
+      return false;
+    } else {
+      return encodingList.contains(encoding);
+    }
+  }
+
+  /**
+   * @return if DataType is ARRAY or STRUCT, this method return true, else
+   * false.
+   */
+  public Boolean isComplex() {
+    if (DataType.ARRAY.equals(this.getDataType()) || DataType.STRUCT.equals(this.getDataType())) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  /**
+   * @param columnProperties
+   */
+  public void setColumnProperties(Map<String, String> columnProperties) {
+    this.columnProperties = columnProperties;
+  }
+
+  /**
+   * @param property
+   * @return
+   */
+  public String getColumnProperty(String property) {
+    if (null != columnProperties) {
+      return columnProperties.get(property);
+    }
+    return null;
+  }
+
+  /**
+   * return columnproperties
+   */
+  public Map<String, String> getColumnProperties() {
+    return columnProperties;
+  }
+  /**
+   * return the visibility
+   * @return
+   */
+  public boolean isInvisible() {
+    return invisible;
+  }
+
+  /**
+   * set the visibility
+   * @param invisible
+   */
+  public void setInvisible(boolean invisible) {
+    this.invisible = invisible;
+  }
+
+  /**
+   * @return columnReferenceId
+   */
+  public String getColumnReferenceId() {
+    return columnReferenceId;
+  }
+
+  /**
+   * @param columnReferenceId
+   */
+  public void setColumnReferenceId(String columnReferenceId) {
+    this.columnReferenceId = columnReferenceId;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonSharedDictionaryPath.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonSharedDictionaryPath.java b/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonSharedDictionaryPath.java
new file mode 100644
index 0000000..7be92bc
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonSharedDictionaryPath.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.path;
+
+import java.io.File;
+
+/**
+ * Helps to get Shared dimension files path.
+ */
+public class CarbonSharedDictionaryPath {
+
+  private static final String SHAREDDIM_DIR = "SharedDictionary";
+  private static final String DICTIONARY_EXT = ".dict";
+  private static final String DICTIONARY_META_EXT = ".dictmeta";
+  private static final String SORT_INDEX_EXT = ".sortindex";
+
+  /***
+   * @param storePath    store path
+   * @param databaseName data base name
+   * @param columnId     unique column identifier
+   * @return absolute path of shared dictionary file
+   */
+  public static String getDictionaryFilePath(String storePath, String databaseName,
+      String columnId) {
+    return getSharedDictionaryDir(storePath, databaseName) + File.separator + columnId
+        + DICTIONARY_EXT;
+  }
+
+  /***
+   * @param storePath    store path
+   * @param databaseName data base name
+   * @param columnId     unique column identifier
+   * @return absolute path of shared dictionary meta file
+   */
+  public static String getDictionaryMetaFilePath(String storePath, String databaseName,
+      String columnId) {
+    return getSharedDictionaryDir(storePath, databaseName) + File.separator + columnId
+        + DICTIONARY_META_EXT;
+  }
+
+  /***
+   * @param storePath    store path
+   * @param databaseName data base name
+   * @param columnId     unique column identifier
+   * @return absolute path of shared dictionary sort index file
+   */
+  public static String getSortIndexFilePath(String storePath, String databaseName,
+      String columnId) {
+    return getSharedDictionaryDir(storePath, databaseName) + File.separator + columnId
+        + SORT_INDEX_EXT;
+  }
+
+  private static String getSharedDictionaryDir(String storePath, String databaseName) {
+    return storePath + File.separator + databaseName + File.separator + SHAREDDIM_DIR;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonStorePath.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonStorePath.java b/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonStorePath.java
new file mode 100644
index 0000000..567602b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonStorePath.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.path;
+
+import java.io.File;
+
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Helps to get Store content paths.
+ */
+public class CarbonStorePath extends Path {
+
+  private String storePath;
+
+  public CarbonStorePath(String storePathString) {
+    super(storePathString);
+    this.storePath = storePathString;
+  }
+
+  /**
+   * gets CarbonTablePath object to manage table paths
+   */
+  public static CarbonTablePath getCarbonTablePath(String storePath,
+      CarbonTableIdentifier tableIdentifier) {
+    CarbonTablePath carbonTablePath = new CarbonTablePath(tableIdentifier,
+        storePath + File.separator + tableIdentifier.getDatabaseName() + File.separator
+            + tableIdentifier.getTableName());
+
+    return carbonTablePath;
+  }
+
+  /**
+   * gets CarbonTablePath object to manage table paths
+   */
+  public CarbonTablePath getCarbonTablePath(CarbonTableIdentifier tableIdentifier) {
+    return CarbonStorePath.getCarbonTablePath(storePath, tableIdentifier);
+  }
+
+  @Override public boolean equals(Object o) {
+    if (!(o instanceof CarbonStorePath)) {
+      return false;
+    }
+    CarbonStorePath path = (CarbonStorePath)o;
+    return storePath.equals(path.storePath) && super.equals(o);
+  }
+
+  @Override public int hashCode() {
+    return super.hashCode() + storePath.hashCode();
+  }
+}



[13/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/metadata/BlockletInfoColumnar.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/metadata/BlockletInfoColumnar.java b/core/src/main/java/org/carbondata/core/metadata/BlockletInfoColumnar.java
deleted file mode 100644
index ddee42c..0000000
--- a/core/src/main/java/org/carbondata/core/metadata/BlockletInfoColumnar.java
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.metadata;
-
-import java.util.BitSet;
-
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.keygenerator.mdkey.NumberCompressor;
-
-public class BlockletInfoColumnar {
-  /**
-   * fileName.
-   */
-  private String fileName;
-
-  /**
-   * measureOffset.
-   */
-  private long[] measureOffset;
-
-  /**
-   * measureLength.
-   */
-  private int[] measureLength;
-
-  /**
-   * numberOfKeys.
-   */
-  private int numberOfKeys;
-
-  /**
-   * startKey.
-   */
-  private byte[] startKey;
-
-  /**
-   * endKey.
-   */
-  private byte[] endKey;
-
-  /**
-   * keyOffSets
-   */
-  private long[] keyOffSets;
-
-  /**
-   * keyLengths
-   */
-  private int[] keyLengths;
-
-  /**
-   * isSortedKeyColumn
-   */
-  private boolean[] isSortedKeyColumn;
-
-  /**
-   * keyBlockIndexOffSets
-   */
-  private long[] keyBlockIndexOffSets;
-
-  /**
-   * keyBlockIndexLength
-   */
-  private int[] keyBlockIndexLength;
-
-  /**
-   * dataIndexMap
-   */
-  private int[] dataIndexMapLength;
-
-  /**
-   * dataIndexMap
-   */
-  private long[] dataIndexMapOffsets;
-
-  private boolean[] aggKeyBlock;
-  /**
-   * blockletMetaSize
-   */
-  private int blockletMetaSize;
-
-  private NumberCompressor[] keyBlockUnCompressor;
-
-  private ValueCompressionModel compressionModel;
-
-  /**
-   * column min array
-   */
-  private byte[][] columnMaxData;
-
-  /**
-   * column max array
-   */
-  private byte[][] columnMinData;
-
-  /**
-   * true if given index is colgroup block
-   */
-  private boolean[] colGrpBlock;
-
-  /**
-   * bit set which will holds the measure
-   * indexes which are null
-   */
-  private BitSet[] measureNullValueIndex;
-
-  /**
-   * getFileName().
-   *
-   * @return String.
-   */
-  public String getFileName() {
-    return fileName;
-  }
-
-  /**
-   * setFileName.
-   */
-  public void setFileName(String fileName) {
-    this.fileName = fileName;
-  }
-
-  /**
-   * getMeasureLength
-   *
-   * @return int[].
-   */
-  public int[] getMeasureLength() {
-    return measureLength;
-  }
-
-  /**
-   * setMeasureLength.
-   *
-   * @param measureLength
-   */
-  public void setMeasureLength(int[] measureLength) {
-    this.measureLength = measureLength;
-  }
-
-  /**
-   * getMeasureOffset.
-   *
-   * @return long[].
-   */
-  public long[] getMeasureOffset() {
-    return measureOffset;
-  }
-
-  /**
-   * setMeasureOffset.
-   *
-   * @param measureOffset
-   */
-  public void setMeasureOffset(long[] measureOffset) {
-    this.measureOffset = measureOffset;
-  }
-
-  /**
-   * getStartKey().
-   *
-   * @return byte[].
-   */
-  public byte[] getStartKey() {
-    return startKey;
-  }
-
-  /**
-   * setStartKey.
-   *
-   * @param startKey
-   */
-  public void setStartKey(byte[] startKey) {
-    this.startKey = startKey;
-  }
-
-  /**
-   * getEndKey().
-   *
-   * @return byte[].
-   */
-  public byte[] getEndKey() {
-    return endKey;
-  }
-
-  /**
-   * setEndKey.
-   *
-   * @param endKey
-   */
-  public void setEndKey(byte[] endKey) {
-    this.endKey = endKey;
-  }
-
-  /**
-   * @return the keyOffSets
-   */
-  public long[] getKeyOffSets() {
-    return keyOffSets;
-  }
-
-  /**
-   * @param keyOffSets the keyOffSets to set
-   */
-  public void setKeyOffSets(long[] keyOffSets) {
-    this.keyOffSets = keyOffSets;
-  }
-
-  /**
-   * @return the keyLengths
-   */
-  public int[] getKeyLengths() {
-    return keyLengths;
-  }
-
-  //TODO SIMIAN
-
-  /**
-   * @param keyLengths the keyLengths to set
-   */
-  public void setKeyLengths(int[] keyLengths) {
-    this.keyLengths = keyLengths;
-  }
-
-  /**
-   * getNumberOfKeys()
-   *
-   * @return int.
-   */
-  public int getNumberOfKeys() {
-    return numberOfKeys;
-  }
-
-  /**
-   * setNumberOfKeys.
-   *
-   * @param numberOfKeys
-   */
-  public void setNumberOfKeys(int numberOfKeys) {
-    this.numberOfKeys = numberOfKeys;
-  }
-
-  /**
-   * @return the isSortedKeyColumn
-   */
-  public boolean[] getIsSortedKeyColumn() {
-    return isSortedKeyColumn;
-  }
-
-  /**
-   * @param isSortedKeyColumn the isSortedKeyColumn to set
-   */
-  public void setIsSortedKeyColumn(boolean[] isSortedKeyColumn) {
-    this.isSortedKeyColumn = isSortedKeyColumn;
-  }
-
-  /**
-   * @return the keyBlockIndexOffSets
-   */
-  public long[] getKeyBlockIndexOffSets() {
-    return keyBlockIndexOffSets;
-  }
-
-  /**
-   * @param keyBlockIndexOffSets the keyBlockIndexOffSets to set
-   */
-  public void setKeyBlockIndexOffSets(long[] keyBlockIndexOffSets) {
-    this.keyBlockIndexOffSets = keyBlockIndexOffSets;
-  }
-
-  /**
-   * @return the keyBlockIndexLength
-   */
-  public int[] getKeyBlockIndexLength() {
-    return keyBlockIndexLength;
-  }
-
-  /**
-   * @param keyBlockIndexLength the keyBlockIndexLength to set
-   */
-  public void setKeyBlockIndexLength(int[] keyBlockIndexLength) {
-    this.keyBlockIndexLength = keyBlockIndexLength;
-  }
-
-  /**
-   * @return the blockletMetaSize
-   */
-  public int getBlockletMetaSize() {
-    return blockletMetaSize;
-  }
-
-  /**
-   * @param blockletMetaSize the blockletMetaSize to set
-   */
-  public void setBlockletMetaSize(int blockletMetaSize) {
-    this.blockletMetaSize = blockletMetaSize;
-  }
-
-  /**
-   * @return the dataIndexMapLenght
-   */
-  public int[] getDataIndexMapLength() {
-    return dataIndexMapLength;
-  }
-
-  public void setDataIndexMapLength(int[] dataIndexMapLength) {
-    this.dataIndexMapLength = dataIndexMapLength;
-  }
-
-  /**
-   * @return the dataIndexMapOffsets
-   */
-  public long[] getDataIndexMapOffsets() {
-    return dataIndexMapOffsets;
-  }
-
-  public void setDataIndexMapOffsets(long[] dataIndexMapOffsets) {
-    this.dataIndexMapOffsets = dataIndexMapOffsets;
-  }
-
-  public boolean[] getAggKeyBlock() {
-    return aggKeyBlock;
-  }
-
-  public void setAggKeyBlock(boolean[] aggKeyBlock) {
-    this.aggKeyBlock = aggKeyBlock;
-  }
-
-  public NumberCompressor[] getKeyBlockUnCompressor() {
-    return keyBlockUnCompressor;
-  }
-
-  public void setKeyBlockUnCompressor(NumberCompressor[] keyBlockUnCompressor) {
-    this.keyBlockUnCompressor = keyBlockUnCompressor;
-  }
-
-  public byte[][] getColumnMaxData() {
-    return this.columnMaxData;
-  }
-
-  public void setColumnMaxData(byte[][] columnMaxData) {
-    this.columnMaxData = columnMaxData;
-  }
-
-  public byte[][] getColumnMinData() {
-    return this.columnMinData;
-  }
-
-  public void setColumnMinData(byte[][] columnMinData) {
-    this.columnMinData = columnMinData;
-  }
-
-  public ValueCompressionModel getCompressionModel() {
-    return compressionModel;
-  }
-
-  public void setCompressionModel(ValueCompressionModel compressionModel) {
-    this.compressionModel = compressionModel;
-  }
-
-  /**
-   * @return
-   */
-  public boolean[] getColGrpBlocks() {
-    return this.colGrpBlock;
-  }
-
-  /**
-   * @param colGrpBlock
-   */
-  public void setColGrpBlocks(boolean[] colGrpBlock) {
-    this.colGrpBlock = colGrpBlock;
-  }
-
-  /**
-   * @return the measureNullValueIndex
-   */
-  public BitSet[] getMeasureNullValueIndex() {
-    return measureNullValueIndex;
-  }
-
-  /**
-   * @param measureNullValueIndex the measureNullValueIndex to set
-   */
-  public void setMeasureNullValueIndex(BitSet[] measureNullValueIndex) {
-    this.measureNullValueIndex = measureNullValueIndex;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/metadata/ValueEncoderMeta.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/metadata/ValueEncoderMeta.java b/core/src/main/java/org/carbondata/core/metadata/ValueEncoderMeta.java
deleted file mode 100644
index f7801b3..0000000
--- a/core/src/main/java/org/carbondata/core/metadata/ValueEncoderMeta.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.metadata;
-
-import java.io.Serializable;
-
-/**
- * It holds Value compression metadata for one data column
- */
-public class ValueEncoderMeta implements Serializable {
-
-  /**
-   * maxValue
-   */
-  private Object maxValue;
-  /**
-   * minValue.
-   */
-  private Object minValue;
-
-  /**
-   * uniqueValue
-   */
-  private Object uniqueValue;
-  /**
-   * decimal.
-   */
-  private int decimal;
-
-  /**
-   * aggType
-   */
-  private char type;
-
-  /**
-   * dataTypeSelected
-   */
-  private byte dataTypeSelected;
-
-  public Object getMaxValue() {
-    return maxValue;
-  }
-
-  public void setMaxValue(Object maxValue) {
-    this.maxValue = maxValue;
-  }
-
-  public Object getMinValue() {
-    return minValue;
-  }
-
-  public void setMinValue(Object minValue) {
-    this.minValue = minValue;
-  }
-
-  public Object getUniqueValue() {
-    return uniqueValue;
-  }
-
-  public void setUniqueValue(Object uniqueValue) {
-    this.uniqueValue = uniqueValue;
-  }
-
-  public int getDecimal() {
-    return decimal;
-  }
-
-  public void setDecimal(int decimal) {
-    this.decimal = decimal;
-  }
-
-  public char getType() {
-    return type;
-  }
-
-  public void setType(char type) {
-    this.type = type;
-  }
-
-  public byte getDataTypeSelected() {
-    return dataTypeSelected;
-  }
-
-  public void setDataTypeSelected(byte dataTypeSelected) {
-    this.dataTypeSelected = dataTypeSelected;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryColumnMetaChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryColumnMetaChunk.java b/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryColumnMetaChunk.java
deleted file mode 100644
index 35bc9c1..0000000
--- a/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryColumnMetaChunk.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.reader;
-
-/**
- * A wrapper class for thrift class ColumnDictionaryChunkMeta which will
- * contain data like min and max surrogate key, start and end offset, chunk count
- */
-public class CarbonDictionaryColumnMetaChunk {
-
-  /**
-   * Minimum value surrogate key for a segment
-   */
-  private int min_surrogate_key;
-
-  /**
-   * Max value of surrogate key for a segment
-   */
-  private int max_surrogate_key;
-
-  /**
-   * start offset of dictionary chunk in dictionary file for a segment
-   */
-  private long start_offset;
-
-  /**
-   * end offset of dictionary chunk in dictionary file for a segment
-   */
-  private long end_offset;
-
-  /**
-   * count of dictionary chunks for a segment
-   */
-  private int chunk_count;
-
-  /**
-   * constructor
-   *
-   * @param min_surrogate_key Minimum value surrogate key for a segment
-   * @param max_surrogate_key Maximum value surrogate key for a segment
-   * @param start_offset      start offset of dictionary chunk in dictionary file for a segment
-   * @param end_offset        end offset of dictionary chunk in dictionary file for a segment
-   * @param chunk_count       count of dictionary chunks for a segment
-   */
-  public CarbonDictionaryColumnMetaChunk(int min_surrogate_key, int max_surrogate_key,
-      long start_offset, long end_offset, int chunk_count) {
-    this.min_surrogate_key = min_surrogate_key;
-    this.max_surrogate_key = max_surrogate_key;
-    this.start_offset = start_offset;
-    this.end_offset = end_offset;
-    this.chunk_count = chunk_count;
-  }
-
-  /**
-   * @return min surrogate key
-   */
-  public int getMin_surrogate_key() {
-    return min_surrogate_key;
-  }
-
-  /**
-   * @return max surrogate key
-   */
-  public int getMax_surrogate_key() {
-    return max_surrogate_key;
-  }
-
-  /**
-   * @return start offset
-   */
-  public long getStart_offset() {
-    return start_offset;
-  }
-
-  /**
-   * @return end offset
-   */
-  public long getEnd_offset() {
-    return end_offset;
-  }
-
-  /**
-   * @return chunk count
-   */
-  public int getChunk_count() {
-    return chunk_count;
-  }
-}
-
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryMetadataReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryMetadataReader.java b/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryMetadataReader.java
deleted file mode 100644
index df437a8..0000000
--- a/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryMetadataReader.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.reader;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * dictionary metadata reader interface which declares methods to read dictionary metadata
- */
-public interface CarbonDictionaryMetadataReader extends Closeable {
-
-  /**
-   * This method will be used to read complete metadata file.
-   * Applicable scenarios:
-   * 1. Query execution. Whenever a query is executed then to read the dictionary file
-   * and define the query scope first dictionary metadata has to be read first.
-   * 2. If dictionary file is read using start and end offset then using this meta list
-   * we can count the total number of dictionary chunks present between the 2 offsets
-   *
-   * @return list of all dictionary meta chunks which contains information for each segment
-   * @throws IOException if an I/O error occurs
-   */
-  List<CarbonDictionaryColumnMetaChunk> read() throws IOException;
-
-  /**
-   * This method will be used to read only the last entry of dictionary meta chunk.
-   * Applicable scenarios :
-   * 1. Global dictionary generation for incremental load. In this case only the
-   * last dictionary chunk meta entry has to be read to calculate min, max surrogate
-   * key and start and end offset for the new dictionary chunk.
-   * 2. Truncate operation. While writing dictionary file in case of incremental load
-   * dictionary file needs to be validated for any inconsistency. Here end offset of last
-   * dictionary chunk meta is validated with file size.
-   *
-   * @return last segment entry for dictionary chunk
-   * @throws IOException if an I/O error occurs
-   */
-  CarbonDictionaryColumnMetaChunk readLastEntryOfDictionaryMetaChunk() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryMetadataReaderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryMetadataReaderImpl.java b/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryMetadataReaderImpl.java
deleted file mode 100644
index 227dc8d..0000000
--- a/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryMetadataReaderImpl.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.reader;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.common.factory.CarbonCommonFactory;
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.carbon.path.CarbonTablePath;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.service.PathService;
-import org.carbondata.format.ColumnDictionaryChunkMeta;
-
-import org.apache.thrift.TBase;
-
-/**
- * This class perform the functionality of reading the dictionary metadata file
- */
-public class CarbonDictionaryMetadataReaderImpl implements CarbonDictionaryMetadataReader {
-
-  /**
-   * carbon table identifier
-   */
-  protected CarbonTableIdentifier carbonTableIdentifier;
-
-  /**
-   * HDFS store path
-   */
-  protected String hdfsStorePath;
-
-  /**
-   * column identifier
-   */
-  protected ColumnIdentifier columnIdentifier;
-
-  /**
-   * dictionary metadata file path
-   */
-  protected String columnDictionaryMetadataFilePath;
-
-  /**
-   * dictionary metadata thrift file reader
-   */
-  private ThriftReader dictionaryMetadataFileReader;
-
-  /**
-   * Constructor
-   *
-   * @param hdfsStorePath         HDFS store path
-   * @param carbonTableIdentifier table identifier which will give table name and database name
-   * @param columnIdentifier      column unique identifier
-   */
-  public CarbonDictionaryMetadataReaderImpl(String hdfsStorePath,
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier) {
-    this.hdfsStorePath = hdfsStorePath;
-    this.carbonTableIdentifier = carbonTableIdentifier;
-    this.columnIdentifier = columnIdentifier;
-    initFileLocation();
-  }
-
-  /**
-   * This method will be used to read complete metadata file.
-   * Applicable scenarios:
-   * 1. Query execution. Whenever a query is executed then to read the dictionary file
-   * and define the query scope first dictionary metadata has to be read first.
-   * 2. If dictionary file is read using start and end offset then using this meta list
-   * we can count the total number of dictionary chunks present between the 2 offsets
-   *
-   * @return list of all dictionary meta chunks which contains information for each segment
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public List<CarbonDictionaryColumnMetaChunk> read() throws IOException {
-    List<CarbonDictionaryColumnMetaChunk> dictionaryMetaChunks =
-        new ArrayList<CarbonDictionaryColumnMetaChunk>(
-            CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    CarbonDictionaryColumnMetaChunk columnMetaChunk = null;
-    ColumnDictionaryChunkMeta dictionaryChunkMeta = null;
-    // open dictionary meta thrift reader
-    openThriftReader();
-    // read till dictionary chunk count
-    while (dictionaryMetadataFileReader.hasNext()) {
-      // get the thrift object for dictionary chunk
-      dictionaryChunkMeta = (ColumnDictionaryChunkMeta) dictionaryMetadataFileReader.read();
-      // create a new instance of chunk meta wrapper using thrift object
-      columnMetaChunk = getNewInstanceOfCarbonDictionaryColumnMetaChunk(dictionaryChunkMeta);
-      dictionaryMetaChunks.add(columnMetaChunk);
-    }
-    return dictionaryMetaChunks;
-  }
-
-  /**
-   * This method will be used to read only the last entry of dictionary meta chunk.
-   * Applicable scenarios :
-   * 1. Global dictionary generation for incremental load. In this case only the
-   * last dictionary chunk meta entry has to be read to calculate min, max surrogate
-   * key and start and end offset for the new dictionary chunk.
-   * 2. Truncate operation. While writing dictionary file in case of incremental load
-   * dictionary file needs to be validated for any inconsistency. Here end offset of last
-   * dictionary chunk meta is validated with file size.
-   *
-   * @return last segment entry for dictionary chunk
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public CarbonDictionaryColumnMetaChunk readLastEntryOfDictionaryMetaChunk()
-      throws IOException {
-    ColumnDictionaryChunkMeta dictionaryChunkMeta = null;
-    // open dictionary meta thrift reader
-    openThriftReader();
-    // at the completion of while loop we will get the last dictionary chunk entry
-    while (dictionaryMetadataFileReader.hasNext()) {
-      // get the thrift object for dictionary chunk
-      dictionaryChunkMeta = (ColumnDictionaryChunkMeta) dictionaryMetadataFileReader.read();
-    }
-    // create a new instance of chunk meta wrapper using thrift object
-    CarbonDictionaryColumnMetaChunk columnMetaChunkForLastSegment =
-        getNewInstanceOfCarbonDictionaryColumnMetaChunk(dictionaryChunkMeta);
-    return columnMetaChunkForLastSegment;
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated
-   * with it. If the stream is already closed then invoking this
-   * method has no effect.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public void close() throws IOException {
-    if (null != dictionaryMetadataFileReader) {
-      dictionaryMetadataFileReader.close();
-      dictionaryMetadataFileReader = null;
-    }
-  }
-
-  /**
-   * This method will form the path for dictionary metadata file for a given column
-   */
-  protected void initFileLocation() {
-    PathService pathService = CarbonCommonFactory.getPathService();
-    CarbonTablePath carbonTablePath = pathService.getCarbonTablePath(columnIdentifier,
-                this.hdfsStorePath, carbonTableIdentifier);
-    this.columnDictionaryMetadataFilePath =
-        carbonTablePath.getDictionaryMetaFilePath(columnIdentifier.getColumnId());
-  }
-
-  /**
-   * This method will open the dictionary file stream for reading
-   *
-   * @throws IOException thrift reader open method throws IOException
-   */
-  private void openThriftReader() throws IOException {
-    // initialise dictionary file reader which will return dictionary thrift object
-    // dictionary thrift object contains a list of byte buffer
-    if (null == dictionaryMetadataFileReader) {
-      dictionaryMetadataFileReader =
-          new ThriftReader(this.columnDictionaryMetadataFilePath, new ThriftReader.TBaseCreator() {
-            @Override public TBase create() {
-              return new ColumnDictionaryChunkMeta();
-            }
-          });
-      // Open it
-      dictionaryMetadataFileReader.open();
-    }
-
-  }
-
-  /**
-   * Given a thrift object thie method will create a new wrapper class object
-   * for dictionary chunk
-   *
-   * @param dictionaryChunkMeta reference for chunk meta thrift object
-   * @return wrapper object of dictionary chunk meta
-   */
-  private CarbonDictionaryColumnMetaChunk getNewInstanceOfCarbonDictionaryColumnMetaChunk(
-      ColumnDictionaryChunkMeta dictionaryChunkMeta) {
-    CarbonDictionaryColumnMetaChunk columnMetaChunk =
-        new CarbonDictionaryColumnMetaChunk(dictionaryChunkMeta.getMin_surrogate_key(),
-            dictionaryChunkMeta.getMax_surrogate_key(), dictionaryChunkMeta.getStart_offset(),
-            dictionaryChunkMeta.getEnd_offset(), dictionaryChunkMeta.getChunk_count());
-    return columnMetaChunk;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryReader.java b/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryReader.java
deleted file mode 100644
index 0470f40..0000000
--- a/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryReader.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.reader;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * dictionary reader interface which declares methods for
- * reading carbon dictionary files
- */
-public interface CarbonDictionaryReader extends Closeable {
-  /**
-   * This method should be used when complete dictionary data needs to be read.
-   * Applicable scenarios :
-   * 1. Global dictionary generation in case of incremental load
-   * 2. Reading dictionary file on first time query
-   * 3. Loading a dictionary column in memory based on query requirement.
-   * This is a case where carbon column cache feature is enabled in which a
-   * column dictionary is read if it is present in the query.
-   *
-   * @return list of byte array. Each byte array is unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  List<byte[]> read() throws IOException;
-
-  /**
-   * This method should be used when data has to be read from a given offset.
-   * Applicable scenarios :
-   * 1. Incremental data load. If column dictionary is already loaded in memory
-   * and incremental load is done, then for the new query only new dictionary data
-   * has to be read form memory.
-   *
-   * @param startOffset start offset of dictionary file
-   * @return list of byte array. Each byte array is unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  List<byte[]> read(long startOffset) throws IOException;
-
-  /**
-   * This method will be used to read data between given start and end offset.
-   * Applicable scenarios:
-   * 1. Truncate operation. If there is any inconsistency while writing the dictionary file
-   * then we can give the start and end offset till where the data has to be retained.
-   *
-   * @param startOffset start offset of dictionary file
-   * @param endOffset   end offset of dictionary file
-   * @return list of byte array. Each byte array is unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  List<byte[]> read(long startOffset, long endOffset) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryReaderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryReaderImpl.java b/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryReaderImpl.java
deleted file mode 100644
index a93d47a..0000000
--- a/core/src/main/java/org/carbondata/core/reader/CarbonDictionaryReaderImpl.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.reader;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.common.factory.CarbonCommonFactory;
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.carbon.path.CarbonTablePath;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.service.PathService;
-import org.carbondata.format.ColumnDictionaryChunk;
-
-import org.apache.thrift.TBase;
-
-/**
- * This class performs the functionality of reading a carbon dictionary file.
- * It implements various overloaded method for read functionality.
- */
-public class CarbonDictionaryReaderImpl implements CarbonDictionaryReader {
-
-  /**
-   * carbon table identifier
-   */
-  protected CarbonTableIdentifier carbonTableIdentifier;
-
-  /**
-   * HDFS store path
-   */
-  protected String hdfsStorePath;
-
-  /**
-   * column name
-   */
-  protected ColumnIdentifier columnIdentifier;
-
-  /**
-   * dictionary file path
-   */
-  protected String columnDictionaryFilePath;
-
-  /**
-   * dictionary thrift file reader
-   */
-  private ThriftReader dictionaryFileReader;
-
-  /**
-   * Constructor
-   *
-   * @param hdfsStorePath         HDFS store path
-   * @param carbonTableIdentifier table identifier which will give table name and database name
-   * @param columnIdentifier      column unique identifier
-   */
-  public CarbonDictionaryReaderImpl(String hdfsStorePath,
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier) {
-    this.hdfsStorePath = hdfsStorePath;
-    this.carbonTableIdentifier = carbonTableIdentifier;
-    this.columnIdentifier = columnIdentifier;
-    initFileLocation();
-  }
-
-  /**
-   * This method should be used when complete dictionary data needs to be read.
-   * Applicable scenarios :
-   * 1. Global dictionary generation in case of incremental load
-   * 2. Reading dictionary file on first time query
-   * 3. Loading a dictionary column in memory based on query requirement.
-   * This is a case where carbon column cache feature is enabled in which a
-   * column dictionary is read if it is present in the query.
-   *
-   * @return list of byte array. Each byte array is unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public List<byte[]> read() throws IOException {
-    return read(0L);
-  }
-
-  /**
-   * This method should be used when data has to be read from a given offset.
-   * Applicable scenarios :
-   * 1. Incremental data load. If column dictionary is already loaded in memory
-   * and incremental load is done, then for the new query only new dictionary data
-   * has to be read form memory.
-   *
-   * @param startOffset start offset of dictionary file
-   * @return list of byte array. Each byte array is unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public List<byte[]> read(long startOffset) throws IOException {
-    List<CarbonDictionaryColumnMetaChunk> carbonDictionaryColumnMetaChunks =
-        readDictionaryMetadataFile();
-    // get the last entry for carbon dictionary meta chunk
-    CarbonDictionaryColumnMetaChunk carbonDictionaryColumnMetaChunk =
-        carbonDictionaryColumnMetaChunks.get(carbonDictionaryColumnMetaChunks.size() - 1);
-    // end offset till where the dictionary file has to be read
-    long endOffset = carbonDictionaryColumnMetaChunk.getEnd_offset();
-    return read(carbonDictionaryColumnMetaChunks, startOffset, endOffset);
-  }
-
-  /**
-   * This method will be used to read data between given start and end offset.
-   * Applicable scenarios:
-   * 1. Truncate operation. If there is any inconsistency while writing the dictionary file
-   * then we can give the start and end offset till where the data has to be retained.
-   *
-   * @param startOffset start offset of dictionary file
-   * @param endOffset   end offset of dictionary file
-   * @return list of byte array. Each byte array is unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public List<byte[]> read(long startOffset, long endOffset) throws IOException {
-    List<CarbonDictionaryColumnMetaChunk> carbonDictionaryColumnMetaChunks =
-        readDictionaryMetadataFile();
-    return read(carbonDictionaryColumnMetaChunks, startOffset, endOffset);
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated
-   * with it. If the stream is already closed then invoking this
-   * method has no effect.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public void close() throws IOException {
-    if (null != dictionaryFileReader) {
-      dictionaryFileReader.close();
-      dictionaryFileReader = null;
-    }
-  }
-
-  /**
-   * @param carbonDictionaryColumnMetaChunks dictionary meta chunk list
-   * @param startOffset                      start offset for dictionary data file
-   * @param endOffset                        end offset till where data has
-   *                                         to be read from dictionary data file
-   * @return list of byte array dictionary values
-   * @throws IOException readDictionary file method throws IO exception
-   */
-  private List<byte[]> read(List<CarbonDictionaryColumnMetaChunk> carbonDictionaryColumnMetaChunks,
-      long startOffset, long endOffset) throws IOException {
-    // calculate the number of chunks to be read from dictionary file from start offset
-    int dictionaryChunkCountsToBeRead =
-        calculateTotalDictionaryChunkCountsToBeRead(carbonDictionaryColumnMetaChunks, startOffset,
-            endOffset);
-    // open dictionary file thrift reader
-    openThriftReader();
-    // read the required number of chunks from dictionary file
-    List<ColumnDictionaryChunk> columnDictionaryChunks =
-        readDictionaryFile(startOffset, dictionaryChunkCountsToBeRead);
-    // convert byte buffer list to byte array list of dictionary vlaues
-    List<byte[]> dictionaryValues =
-        new ArrayList<byte[]>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    for (ColumnDictionaryChunk dictionaryChunk : columnDictionaryChunks) {
-      convertAndFillByteBufferListToByteArrayList(dictionaryValues, dictionaryChunk.getValues());
-    }
-    return dictionaryValues;
-  }
-
-  /**
-   * This method will convert and fill list of byte buffer to list of byte array
-   *
-   * @param dictionaryValues          list of byte array. Each byte array is
-   *                                  unique dictionary value
-   * @param dictionaryValueBufferList dictionary thrift object which is a list of byte buffer.
-   *                                  Each dictionary value is a wrapped in byte buffer before
-   *                                  writing to file
-   */
-  private void convertAndFillByteBufferListToByteArrayList(List<byte[]> dictionaryValues,
-      List<ByteBuffer> dictionaryValueBufferList) {
-    for (ByteBuffer buffer : dictionaryValueBufferList) {
-      int length = buffer.limit();
-      byte[] value = new byte[length];
-      buffer.get(value, 0, value.length);
-      dictionaryValues.add(value);
-    }
-  }
-
-  /**
-   * This method will form the path for dictionary file for a given column
-   */
-  protected void initFileLocation() {
-    PathService pathService = CarbonCommonFactory.getPathService();
-    CarbonTablePath carbonTablePath = pathService.getCarbonTablePath(columnIdentifier,
-                this.hdfsStorePath, carbonTableIdentifier);
-    this.columnDictionaryFilePath = carbonTablePath
-        .getDictionaryFilePath(columnIdentifier.getColumnId());
-  }
-
-  /**
-   * This method will read the dictionary file and return the list of dictionary thrift object
-   *
-   * @param dictionaryStartOffset        start offset for dictionary file
-   * @param dictionaryChunkCountToBeRead number of dictionary chunks to be read
-   * @return list of dictionary chunks
-   * @throws IOException setReadOffset method throws I/O exception
-   */
-  private List<ColumnDictionaryChunk> readDictionaryFile(long dictionaryStartOffset,
-      int dictionaryChunkCountToBeRead) throws IOException {
-    List<ColumnDictionaryChunk> dictionaryChunks =
-        new ArrayList<ColumnDictionaryChunk>(dictionaryChunkCountToBeRead);
-    // skip the number of bytes if a start offset is given
-    dictionaryFileReader.setReadOffset(dictionaryStartOffset);
-    // read till dictionary chunk count
-    while (dictionaryFileReader.hasNext()
-        && dictionaryChunks.size() != dictionaryChunkCountToBeRead) {
-      dictionaryChunks.add((ColumnDictionaryChunk) dictionaryFileReader.read());
-    }
-    return dictionaryChunks;
-  }
-
-  /**
-   * This method will read the dictionary metadata file for a given column
-   * and calculate the number of chunks to be read from the dictionary file.
-   * It will do a strict validation for start and end offset as if the offsets are not
-   * exactly matching, because data is written in thrift format, the thrift object
-   * will not be retrieved properly
-   *
-   * @param dictionaryChunkMetaList    list of dictionary chunk metadata
-   * @param dictionaryChunkStartOffset start offset for a dictionary chunk
-   * @param dictionaryChunkEndOffset   end offset for a dictionary chunk
-   * @return
-   */
-  private int calculateTotalDictionaryChunkCountsToBeRead(
-      List<CarbonDictionaryColumnMetaChunk> dictionaryChunkMetaList,
-      long dictionaryChunkStartOffset, long dictionaryChunkEndOffset) {
-    boolean chunkWithStartOffsetFound = false;
-    int dictionaryChunkCount = 0;
-    for (CarbonDictionaryColumnMetaChunk metaChunk : dictionaryChunkMetaList) {
-      // find the column meta chunk whose start offset value matches
-      // with the given dictionary start offset
-      if (!chunkWithStartOffsetFound && dictionaryChunkStartOffset == metaChunk.getStart_offset()) {
-        chunkWithStartOffsetFound = true;
-      }
-      // start offset is found then keep adding the chunk count to be read
-      if (chunkWithStartOffsetFound) {
-        dictionaryChunkCount = dictionaryChunkCount + metaChunk.getChunk_count();
-      }
-      // when end offset is reached then break the loop
-      if (dictionaryChunkEndOffset == metaChunk.getEnd_offset()) {
-        break;
-      }
-    }
-    return dictionaryChunkCount;
-  }
-
-  /**
-   * This method will read dictionary metadata file and return the dictionary meta chunks
-   *
-   * @return list of dictionary metadata chunks
-   * @throws IOException read and close method throws IO exception
-   */
-  private List<CarbonDictionaryColumnMetaChunk> readDictionaryMetadataFile() throws IOException {
-    CarbonDictionaryMetadataReader columnMetadataReaderImpl = getDictionaryMetadataReader();
-    List<CarbonDictionaryColumnMetaChunk> dictionaryMetaChunkList = null;
-    // read metadata file
-    try {
-      dictionaryMetaChunkList = columnMetadataReaderImpl.read();
-    } finally {
-      // close the metadata reader
-      columnMetadataReaderImpl.close();
-    }
-    return dictionaryMetaChunkList;
-  }
-
-  /**
-   * @return
-   */
-  protected CarbonDictionaryMetadataReader getDictionaryMetadataReader() {
-    return new CarbonDictionaryMetadataReaderImpl(this.hdfsStorePath, carbonTableIdentifier,
-        this.columnIdentifier);
-  }
-
-  /**
-   * This method will open the dictionary file stream for reading
-   *
-   * @throws IOException thrift reader open method throws IOException
-   */
-  private void openThriftReader() throws IOException {
-    if (null == dictionaryFileReader) {
-      // initialise dictionary file reader which will return dictionary thrift object
-      // dictionary thrift object contains a list of byte buffer
-      dictionaryFileReader =
-          new ThriftReader(this.columnDictionaryFilePath, new ThriftReader.TBaseCreator() {
-            @Override public TBase create() {
-              return new ColumnDictionaryChunk();
-            }
-          });
-      // Open dictionary file reader
-      dictionaryFileReader.open();
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/reader/CarbonFooterReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/reader/CarbonFooterReader.java b/core/src/main/java/org/carbondata/core/reader/CarbonFooterReader.java
deleted file mode 100644
index 72bcbce..0000000
--- a/core/src/main/java/org/carbondata/core/reader/CarbonFooterReader.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.reader;
-
-import java.io.IOException;
-
-import org.carbondata.format.FileFooter;
-
-import org.apache.thrift.TBase;
-
-/**
- * Reads the metadata from fact file in org.carbondata.format.FileFooter thrift object
- */
-public class CarbonFooterReader {
-
-  //Fact file path
-  private String filePath;
-
-  //From which offset of file this metadata should be read
-  private long offset;
-
-  public CarbonFooterReader(String filePath, long offset) {
-
-    this.filePath = filePath;
-    this.offset = offset;
-  }
-
-  /**
-   * It reads the metadata in FileFooter thrift object format.
-   *
-   * @return
-   * @throws IOException
-   */
-  public FileFooter readFooter() throws IOException {
-    ThriftReader thriftReader = openThriftReader(filePath);
-    thriftReader.open();
-    //Set the offset from where it should read
-    thriftReader.setReadOffset(offset);
-    FileFooter footer = (FileFooter) thriftReader.read();
-    thriftReader.close();
-    return footer;
-  }
-
-  /**
-   * Open the thrift reader
-   *
-   * @param filePath
-   * @return
-   * @throws IOException
-   */
-  private ThriftReader openThriftReader(String filePath) throws IOException {
-
-    ThriftReader thriftReader = new ThriftReader(filePath, new ThriftReader.TBaseCreator() {
-      @Override public TBase create() {
-        return new FileFooter();
-      }
-    });
-    return thriftReader;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/reader/CarbonIndexFileReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/reader/CarbonIndexFileReader.java b/core/src/main/java/org/carbondata/core/reader/CarbonIndexFileReader.java
deleted file mode 100644
index bb18e9f..0000000
--- a/core/src/main/java/org/carbondata/core/reader/CarbonIndexFileReader.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.reader;
-
-import java.io.IOException;
-
-import org.carbondata.format.BlockIndex;
-import org.carbondata.format.IndexHeader;
-
-import org.apache.thrift.TBase;
-
-/**
- * Reader class which will be used to read the index file
- */
-public class CarbonIndexFileReader {
-
-  /**
-   * reader
-   */
-  private ThriftReader thriftReader;
-
-  /**
-   * Below method will be used to read the index header
-   *
-   * @return index header
-   * @throws IOException if any problem  while reader the header
-   */
-  public IndexHeader readIndexHeader() throws IOException {
-    IndexHeader indexHeader = (IndexHeader) thriftReader.read(new ThriftReader.TBaseCreator() {
-      @Override public TBase create() {
-        return new IndexHeader();
-      }
-    });
-    return indexHeader;
-  }
-
-  /**
-   * Below method will be used to close the reader
-   */
-  public void closeThriftReader() {
-    thriftReader.close();
-  }
-
-  /**
-   * Below method will be used to read the block index from fie
-   *
-   * @return block index info
-   * @throws IOException if problem while reading the block index
-   */
-  public BlockIndex readBlockIndexInfo() throws IOException {
-    BlockIndex blockInfo = (BlockIndex) thriftReader.read(new ThriftReader.TBaseCreator() {
-      @Override public TBase create() {
-        return new BlockIndex();
-      }
-    });
-    return blockInfo;
-  }
-
-  /**
-   * Open the thrift reader
-   *
-   * @param filePath
-   * @throws IOException
-   */
-  public void openThriftReader(String filePath) throws IOException {
-    thriftReader = new ThriftReader(filePath);
-    thriftReader.open();
-  }
-
-  /**
-   * check if any more object is present
-   *
-   * @return true if any more object can be read
-   * @throws IOException
-   */
-  public boolean hasNext() throws IOException {
-    return thriftReader.hasNext();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/reader/ThriftReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/reader/ThriftReader.java b/core/src/main/java/org/carbondata/core/reader/ThriftReader.java
deleted file mode 100644
index 92a6be1..0000000
--- a/core/src/main/java/org/carbondata/core/reader/ThriftReader.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.reader;
-
-import java.io.DataInputStream;
-import java.io.IOException;
-
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.util.CarbonUtil;
-
-import org.apache.thrift.TBase;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.TIOStreamTransport;
-
-/**
- * A simple class for reading Thrift objects (of a single type) from a fileName.
- */
-public class ThriftReader {
-  /**
-   * buffer size
-   */
-  private static final int bufferSize = 2048;
-  /**
-   * File containing the objects.
-   */
-  private String fileName;
-  /**
-   * Used to create empty objects that will be initialized with values from the fileName.
-   */
-  private TBaseCreator creator;
-  /**
-   * For reading the fileName.
-   */
-  private DataInputStream dataInputStream;
-  /**
-   * For reading the binary thrift objects.
-   */
-  private TProtocol binaryIn;
-
-  /**
-   * Constructor.
-   */
-  public ThriftReader(String fileName, TBaseCreator creator) {
-    this.fileName = fileName;
-    this.creator = creator;
-  }
-
-  /**
-   * Constructor.
-   */
-  public ThriftReader(String fileName) {
-    this.fileName = fileName;
-  }
-
-  /**
-   * Opens the fileName for reading.
-   */
-  public void open() throws IOException {
-    FileFactory.FileType fileType = FileFactory.getFileType(fileName);
-    dataInputStream = FileFactory.getDataInputStream(fileName, fileType, bufferSize);
-    binaryIn = new TCompactProtocol(new TIOStreamTransport(dataInputStream));
-  }
-
-  /**
-   * This method will set the position of stream from where data has to be read
-   */
-  public void setReadOffset(long bytesToSkip) throws IOException {
-    if (dataInputStream.skip(bytesToSkip) != bytesToSkip) {
-      throw new IOException("It doesn't set the offset properly");
-    }
-  }
-
-  /**
-   * Checks if another objects is available by attempting to read another byte from the stream.
-   */
-  public boolean hasNext() throws IOException {
-    dataInputStream.mark(1);
-    int val = dataInputStream.read();
-    dataInputStream.reset();
-    return val != -1;
-  }
-
-  /**
-   * Reads the next object from the fileName.
-   */
-  public TBase read() throws IOException {
-    TBase t = creator.create();
-    try {
-      t.read(binaryIn);
-    } catch (TException e) {
-      throw new IOException(e);
-    }
-    return t;
-  }
-
-  /**
-   * Reads the next object from the fileName.
-   *
-   * @param creator type of object which will be returned
-   * @throws IOException any problem while reading
-   */
-  public TBase read(TBaseCreator creator) throws IOException {
-    TBase t = creator.create();
-    try {
-      t.read(binaryIn);
-    } catch (TException e) {
-      throw new IOException(e);
-    }
-    return t;
-  }
-
-  /**
-   * Close the fileName.
-   */
-  public void close() {
-    CarbonUtil.closeStreams(dataInputStream);
-  }
-
-  /**
-   * Thrift deserializes by taking an existing object and populating it. ThriftReader
-   * needs a way of obtaining instances of the class to be populated and this interface
-   * defines the mechanism by which a client provides these instances.
-   */
-  public static interface TBaseCreator {
-    TBase create();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReader.java b/core/src/main/java/org/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReader.java
deleted file mode 100644
index a07c0d4..0000000
--- a/core/src/main/java/org/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReader.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.reader.sortindex;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Interface for reading the dictionary sort index and sort index inverted
- */
-public interface CarbonDictionarySortIndexReader extends Closeable {
-
-  /**
-   * method for reading the carbon dictionary sort index data
-   * from columns sortIndex file.
-   *
-   * @return The method return's the list of dictionary sort Index and sort Index reverse
-   * @throws IOException In case any I/O error occurs
-   */
-  public List<Integer> readSortIndex() throws IOException;
-
-  /**
-   * method for reading the carbon dictionary inverted sort index data
-   * from columns sortIndex file.
-   *
-   * @return The method return's the list of dictionary inverted sort Index
-   * @throws IOException In case any I/O error occurs
-   */
-  public List<Integer> readInvertedSortIndex() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReaderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReaderImpl.java b/core/src/main/java/org/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReaderImpl.java
deleted file mode 100644
index 3ec6e7e..0000000
--- a/core/src/main/java/org/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReaderImpl.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.reader.sortindex;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.carbondata.common.factory.CarbonCommonFactory;
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.carbon.path.CarbonTablePath;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.reader.CarbonDictionaryColumnMetaChunk;
-import org.carbondata.core.reader.CarbonDictionaryMetadataReader;
-import org.carbondata.core.reader.CarbonDictionaryMetadataReaderImpl;
-import org.carbondata.core.reader.ThriftReader;
-import org.carbondata.core.service.PathService;
-import org.carbondata.format.ColumnSortInfo;
-
-import org.apache.thrift.TBase;
-
-/**
- * Implementation for reading the dictionary sort index and inverted sort index .
- */
-public class CarbonDictionarySortIndexReaderImpl implements CarbonDictionarySortIndexReader {
-
-  /**
-   * carbonTable Identifier holding the info of databaseName and tableName
-   */
-  protected CarbonTableIdentifier carbonTableIdentifier;
-
-  /**
-   * column name
-   */
-  protected ColumnIdentifier columnIdentifier;
-
-  /**
-   * hdfs store location
-   */
-  protected String carbonStorePath;
-
-  /**
-   * the path of the dictionary Sort Index file
-   */
-  protected String sortIndexFilePath;
-
-  /**
-   * Column sort info thrift instance.
-   */
-  ColumnSortInfo columnSortInfo = null;
-
-  /**
-   * Comment for <code>LOGGER</code>
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(CarbonDictionarySortIndexReaderImpl.class.getName());
-
-  /**
-   * dictionary sortIndex file Reader
-   */
-  private ThriftReader dictionarySortIndexThriftReader;
-
-  /**
-   * @param carbonTableIdentifier Carbon Table identifier holding the database name and table name
-   * @param columnIdentifier      column name
-   * @param carbonStorePath       carbon store path
-   */
-  public CarbonDictionarySortIndexReaderImpl(final CarbonTableIdentifier carbonTableIdentifier,
-      final ColumnIdentifier columnIdentifier, final String carbonStorePath) {
-    this.carbonTableIdentifier = carbonTableIdentifier;
-    this.columnIdentifier = columnIdentifier;
-    this.carbonStorePath = carbonStorePath;
-  }
-
-  /**
-   * method for reading the carbon dictionary sort index data
-   * from columns sortIndex file.
-   *
-   * @return The method return's the list of dictionary sort Index and sort Index reverse
-   * In case of no member for column empty list will be return
-   * @throws IOException In case any I/O error occurs
-   */
-  @Override public List<Integer> readSortIndex() throws IOException {
-    if (null == columnSortInfo) {
-      readColumnSortInfo();
-    }
-    return columnSortInfo.getSort_index();
-  }
-
-  /**
-   * method for reading the carbon dictionary sort index data
-   * from columns sortIndex file.
-   * In case of no member empty list will be return
-   *
-   * @throws IOException In case any I/O error occurs
-   */
-  private void readColumnSortInfo() throws IOException {
-    init();
-    try {
-      columnSortInfo = (ColumnSortInfo) dictionarySortIndexThriftReader.read();
-    } catch (IOException ie) {
-      LOGGER.error(ie,
-          "problem while reading the column sort info.");
-      throw new IOException("problem while reading the column sort info.", ie);
-    } finally {
-      if (null != dictionarySortIndexThriftReader) {
-        dictionarySortIndexThriftReader.close();
-      }
-    }
-  }
-
-  /**
-   * method for reading the carbon dictionary inverted sort index data
-   * from columns sortIndex file.
-   *
-   * @return The method return's the list of dictionary inverted sort Index
-   * @throws IOException In case any I/O error occurs
-   */
-  @Override public List<Integer> readInvertedSortIndex() throws IOException {
-    if (null == columnSortInfo) {
-      readColumnSortInfo();
-    }
-    return columnSortInfo.getSort_index_inverted();
-  }
-
-  /**
-   * The method initializes the dictionary Sort Index file path
-   * and initialize and opens the thrift reader for dictionary sortIndex file.
-   *
-   * @throws IOException if any I/O errors occurs
-   */
-  private void init() throws IOException {
-    initPath();
-    openThriftReader();
-  }
-
-  protected void initPath() {
-    PathService pathService = CarbonCommonFactory.getPathService();
-    CarbonTablePath carbonTablePath =
-        pathService.getCarbonTablePath(columnIdentifier, carbonStorePath, carbonTableIdentifier);
-    try {
-      CarbonDictionaryColumnMetaChunk chunkMetaObjectForLastSegmentEntry =
-          getChunkMetaObjectForLastSegmentEntry();
-      long dictOffset = chunkMetaObjectForLastSegmentEntry.getEnd_offset();
-      this.sortIndexFilePath =
-          carbonTablePath.getSortIndexFilePath(columnIdentifier.getColumnId(), dictOffset);
-      if (!FileFactory
-          .isFileExist(this.sortIndexFilePath, FileFactory.getFileType(this.sortIndexFilePath))) {
-        this.sortIndexFilePath =
-            carbonTablePath.getSortIndexFilePath(columnIdentifier.getColumnId());
-      }
-    } catch (IOException e) {
-      this.sortIndexFilePath = carbonTablePath.getSortIndexFilePath(columnIdentifier.getColumnId());
-    }
-
-  }
-
-  /**
-   * This method will read the dictionary chunk metadata thrift object for last entry
-   *
-   * @return last entry of dictionary meta chunk
-   * @throws IOException if an I/O error occurs
-   */
-  private CarbonDictionaryColumnMetaChunk getChunkMetaObjectForLastSegmentEntry()
-      throws IOException {
-    CarbonDictionaryMetadataReader columnMetadataReaderImpl = getDictionaryMetadataReader();
-    try {
-      // read the last segment entry for dictionary metadata
-      return columnMetadataReaderImpl.readLastEntryOfDictionaryMetaChunk();
-    } finally {
-      // Close metadata reader
-      columnMetadataReaderImpl.close();
-    }
-  }
-
-  /**
-   * @return
-   */
-  protected CarbonDictionaryMetadataReader getDictionaryMetadataReader() {
-    return new CarbonDictionaryMetadataReaderImpl(carbonStorePath, carbonTableIdentifier,
-        columnIdentifier);
-  }
-
-  /**
-   * This method will open the dictionary sort index file stream for reading
-   *
-   * @throws IOException in case any I/O errors occurs
-   */
-  private void openThriftReader() throws IOException {
-    this.dictionarySortIndexThriftReader =
-        new ThriftReader(this.sortIndexFilePath, new ThriftReader.TBaseCreator() {
-          @Override public TBase create() {
-            return new ColumnSortInfo();
-          }
-        });
-    dictionarySortIndexThriftReader.open();
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated
-   * with it. If the stream is already closed then invoking this
-   * method has no effect.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public void close() throws IOException {
-    if (null != dictionarySortIndexThriftReader) {
-      dictionarySortIndexThriftReader.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/service/ColumnUniqueIdService.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/service/ColumnUniqueIdService.java b/core/src/main/java/org/carbondata/core/service/ColumnUniqueIdService.java
deleted file mode 100644
index e2e0807..0000000
--- a/core/src/main/java/org/carbondata/core/service/ColumnUniqueIdService.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.service;
-
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-
-/**
- * Column Unique id generator
- */
-public interface ColumnUniqueIdService {
-
-  /**
-   * @param databaseName
-   * @param columnSchema
-   * @return generate unique id
-   */
-  public String generateUniqueId(String databaseName, ColumnSchema columnSchema);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/service/DictionaryService.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/service/DictionaryService.java b/core/src/main/java/org/carbondata/core/service/DictionaryService.java
deleted file mode 100644
index 97afe29..0000000
--- a/core/src/main/java/org/carbondata/core/service/DictionaryService.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.service;
-
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.reader.CarbonDictionaryMetadataReader;
-import org.carbondata.core.reader.CarbonDictionaryReader;
-import org.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReader;
-import org.carbondata.core.writer.CarbonDictionaryWriter;
-import org.carbondata.core.writer.sortindex.CarbonDictionarySortIndexWriter;
-
-/**
- * Dictionary service to get writer and reader
- */
-public interface DictionaryService {
-
-  /**
-   * get dictionary writer
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  public CarbonDictionaryWriter getDictionaryWriter(CarbonTableIdentifier carbonTableIdentifier,
-      ColumnIdentifier columnIdentifier, String carbonStorePath);
-
-  /**
-   * get dictionary sort index writer
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  public CarbonDictionarySortIndexWriter getDictionarySortIndexWriter(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath);
-
-  /**
-   * get dictionary metadata reader
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  public CarbonDictionaryMetadataReader getDictionaryMetadataReader(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath);
-
-  /**
-   * get dictionary reader
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  public CarbonDictionaryReader getDictionaryReader(CarbonTableIdentifier carbonTableIdentifier,
-      ColumnIdentifier columnIdentifier, String carbonStorePath);
-
-  /**
-   * get dictionary sort index reader
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  public CarbonDictionarySortIndexReader getDictionarySortIndexReader(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/service/PathService.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/service/PathService.java b/core/src/main/java/org/carbondata/core/service/PathService.java
deleted file mode 100644
index 7ef3a45..0000000
--- a/core/src/main/java/org/carbondata/core/service/PathService.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.service;
-
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.carbon.path.CarbonTablePath;
-
-/**
- * Create helper to get path details
- */
-public interface PathService {
-
-  /**
-   * @param columnIdentifier
-   * @param storeLocation
-   * @param tableIdentifier
-   * @return store path related to tables
-   */
-  CarbonTablePath getCarbonTablePath(ColumnIdentifier columnIdentifier, String storeLocation,
-      CarbonTableIdentifier tableIdentifier);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/ByteUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/ByteUtil.java b/core/src/main/java/org/carbondata/core/util/ByteUtil.java
deleted file mode 100644
index 1f7b8d6..0000000
--- a/core/src/main/java/org/carbondata/core/util/ByteUtil.java
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.util;
-
-import java.lang.reflect.Field;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.security.AccessController;
-import java.security.PrivilegedAction;
-
-/**
- * Util class for byte comparision
- */
-public final class ByteUtil {
-
-  private static final int SIZEOF_LONG = 8;
-
-  private ByteUtil() {
-
-  }
-
-  /**
-   * Compare method for bytes
-   *
-   * @param buffer1
-   * @param buffer2
-   * @return
-   */
-  public static int compare(byte[] buffer1, byte[] buffer2) {
-    // Short circuit equal case
-    if (buffer1 == buffer2) {
-      return 0;
-    }
-    // Bring WritableComparator code local
-    int i = 0;
-    int j = 0;
-    for (; i < buffer1.length && j < buffer2.length; i++, j++) {
-      int a = (buffer1[i] & 0xff);
-      int b = (buffer2[j] & 0xff);
-      if (a != b) {
-        return a - b;
-      }
-    }
-    return 0;
-  }
-
-  /**
-   * covert the long[] to int[]
-   *
-   * @param longArray
-   * @return
-   */
-  public static int[] convertToIntArray(long[] longArray) {
-    int[] intArray = new int[longArray.length];
-    for (int i = 0; i < longArray.length; i++) {
-      intArray[i] = (int) longArray[i];
-
-    }
-    return intArray;
-  }
-
-  /**
-   * Unsafe comparator
-   */
-  public enum UnsafeComparer {
-    /**
-     * instance.
-     */
-    INSTANCE;
-
-    /**
-     * unsafe .
-     */
-    static final sun.misc.Unsafe THEUNSAFE;
-
-    /**
-     * The offset to the first element in a byte array.
-     */
-    static final int BYTE_ARRAY_BASE_OFFSET;
-    static final boolean LITTLEENDIAN = ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN);
-
-    static {
-      THEUNSAFE = (sun.misc.Unsafe) AccessController.doPrivileged(new PrivilegedAction<Object>() {
-        @Override public Object run() {
-          try {
-            Field f = sun.misc.Unsafe.class.getDeclaredField("theUnsafe");
-            f.setAccessible(true);
-            return f.get(null);
-          } catch (NoSuchFieldException e) {
-            // It doesn't matter what we throw;
-            // it's swallowed in getBestComparer().
-            throw new Error();
-          } catch (IllegalAccessException e) {
-            throw new Error();
-          }
-        }
-      });
-
-      BYTE_ARRAY_BASE_OFFSET = THEUNSAFE.arrayBaseOffset(byte[].class);
-
-      // sanity check - this should never fail
-      if (THEUNSAFE.arrayIndexScale(byte[].class) != 1) {
-        throw new AssertionError();
-      }
-
-    }
-
-    /**
-     * Returns true if x1 is less than x2, when both values are treated as
-     * unsigned.
-     */
-    static boolean lessThanUnsigned(long x1, long x2) {
-      return (x1 + Long.MIN_VALUE) < (x2 + Long.MIN_VALUE);
-    }
-
-    /**
-     * Lexicographically compare two arrays.
-     *
-     * @param buffer1 left operand
-     * @param buffer2 right operand
-     * @param offset1 Where to start comparing in the left buffer
-     * @param offset2 Where to start comparing in the right buffer
-     * @param length1 How much to compare from the left buffer
-     * @param length2 How much to compare from the right buffer
-     * @return 0 if equal, < 0 if left is less than right, etc.
-     */
-    public int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2,
-        int length2) {
-      // Short circuit equal case
-      if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) {
-        return 0;
-      }
-      int minLength = Math.min(length1, length2);
-      int minWords = minLength / SIZEOF_LONG;
-      int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET;
-      int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET;
-
-      /*
-       * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes
-       * at a time is no slower than comparing 4 bytes at a time even on
-       * 32-bit. On the other hand, it is substantially faster on 64-bit.
-       */
-      for (int i = 0; i < minWords * SIZEOF_LONG; i += SIZEOF_LONG) {
-        long lw = THEUNSAFE.getLong(buffer1, offset1Adj + (long) i);
-        long rw = THEUNSAFE.getLong(buffer2, offset2Adj + (long) i);
-        long diff = lw ^ rw;
-
-        if (diff != 0) {
-          if (!LITTLEENDIAN) {
-            return lessThanUnsigned(lw, rw) ? -1 : 1;
-          }
-
-          // Use binary search
-          int n = 0;
-          int y;
-          int x = (int) diff;
-          if (x == 0) {
-            x = (int) (diff >>> 32);
-            n = 32;
-          }
-
-          y = x << 16;
-          if (y == 0) {
-            n += 16;
-          } else {
-            x = y;
-          }
-
-          y = x << 8;
-          if (y == 0) {
-            n += 8;
-          }
-          return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL));
-        }
-      }
-
-      // The epilogue to cover the last (minLength % 8) elements.
-      for (int i = minWords * SIZEOF_LONG; i < minLength; i++) {
-        int a = (buffer1[offset1 + i] & 0xff);
-        int b = (buffer2[offset2 + i] & 0xff);
-        if (a != b) {
-          return a - b;
-        }
-      }
-      return length1 - length2;
-    }
-
-    public int compareTo(byte[] buffer1, byte[] buffer2) {
-
-      // Short circuit equal case
-      if (buffer1 == buffer2) {
-        return 0;
-      }
-      int len1 = buffer1.length;
-      int len2 = buffer2.length;
-      int minLength = (len1 <= len2) ? len1 : len2;
-      int minWords = 0;
-
-      /*
-       * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes
-       * at a time is no slower than comparing 4 bytes at a time even on
-       * 32-bit. On the other hand, it is substantially faster on 64-bit.
-       */
-      if (minLength > 7) {
-        minWords = minLength / SIZEOF_LONG;
-        for (int i = 0; i < minWords * SIZEOF_LONG; i += SIZEOF_LONG) {
-          long lw = THEUNSAFE.getLong(buffer1, BYTE_ARRAY_BASE_OFFSET + (long) i);
-          long rw = THEUNSAFE.getLong(buffer2, BYTE_ARRAY_BASE_OFFSET + (long) i);
-          long diff = lw ^ rw;
-
-          if (diff != 0) {
-            if (!LITTLEENDIAN) {
-              return lessThanUnsigned(lw, rw) ? -1 : 1;
-            }
-
-            // Use binary search
-            int k = 0;
-            int y;
-            int x = (int) diff;
-            if (x == 0) {
-              x = (int) (diff >>> 32);
-              k = 32;
-            }
-            y = x << 16;
-            if (y == 0) {
-              k += 16;
-            } else {
-              x = y;
-            }
-
-            y = x << 8;
-            if (y == 0) {
-              k += 8;
-            }
-            return (int) (((lw >>> k) & 0xFFL) - ((rw >>> k) & 0xFFL));
-          }
-        }
-      }
-
-      // The epilogue to cover the last (minLength % 8) elements.
-      for (int i = minWords * SIZEOF_LONG; i < minLength; i++) {
-        int a = (buffer1[i] & 0xff);
-        int b = (buffer2[i] & 0xff);
-        if (a != b) {
-          return a - b;
-        }
-      }
-      return len1 - len2;
-    }
-
-    public boolean equals(byte[] buffer1, byte[] buffer2) {
-      if (buffer1.length != buffer2.length) {
-        return false;
-      }
-      int len = buffer1.length / 8;
-      long currentOffset = BYTE_ARRAY_BASE_OFFSET;
-      for (int i = 0; i < len; i++) {
-        long lw = THEUNSAFE.getLong(buffer1, currentOffset);
-        long rw = THEUNSAFE.getLong(buffer2, currentOffset);
-        if (lw != rw) {
-          return false;
-        }
-        currentOffset += 8;
-      }
-      len = buffer1.length % 8;
-      if (len > 0) {
-        for (int i = 0; i < len; i += 1) {
-          long lw = THEUNSAFE.getByte(buffer1, currentOffset);
-          long rw = THEUNSAFE.getByte(buffer2, currentOffset);
-          if (lw != rw) {
-            return false;
-          }
-          currentOffset += 1;
-        }
-      }
-      return true;
-    }
-
-    /**
-     * Comparing the 2 byte buffers. This is used in case of data load sorting step.
-     *
-     * @param byteBuffer1
-     * @param byteBuffer2
-     * @return
-     */
-    public int compareTo(ByteBuffer byteBuffer1, ByteBuffer byteBuffer2) {
-
-      // Short circuit equal case
-      if (byteBuffer1 == byteBuffer2) {
-        return 0;
-      }
-      int len1 = byteBuffer1.remaining();
-      int len2 = byteBuffer2.remaining();
-      byte[] buffer1 = new byte[len1];
-      byte[] buffer2 = new byte[len2];
-      byteBuffer1.get(buffer1);
-      byteBuffer2.get(buffer2);
-      return compareTo(buffer1, buffer2);
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/CarbonFileFolderComparator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonFileFolderComparator.java b/core/src/main/java/org/carbondata/core/util/CarbonFileFolderComparator.java
deleted file mode 100644
index 307a4e0..0000000
--- a/core/src/main/java/org/carbondata/core/util/CarbonFileFolderComparator.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.util;
-
-import java.util.Comparator;
-
-import org.carbondata.core.datastorage.store.filesystem.CarbonFile;
-
-public class CarbonFileFolderComparator implements Comparator<CarbonFile> {
-
-  /**
-   * Below method will be used to compare two file
-   *
-   * @param o1 first file
-   * @param o2 Second file
-   * @return compare result
-   */
-  @Override public int compare(CarbonFile o1, CarbonFile o2) {
-    String firstFileName = o1.getName();
-    String secondFileName = o2.getName();
-    int lastIndexOfO1 = firstFileName.lastIndexOf('_');
-    int lastIndexOfO2 = secondFileName.lastIndexOf('_');
-    int file1 = 0;
-    int file2 = 0;
-
-    try {
-      file1 = Integer.parseInt(firstFileName.substring(lastIndexOfO1 + 1));
-      file2 = Integer.parseInt(secondFileName.substring(lastIndexOfO2 + 1));
-    } catch (NumberFormatException e) {
-      return -1;
-    }
-    return (file1 < file2) ? -1 : (file1 == file2 ? 0 : 1);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/CarbonLoadStatisticsDummy.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonLoadStatisticsDummy.java b/core/src/main/java/org/carbondata/core/util/CarbonLoadStatisticsDummy.java
deleted file mode 100644
index bb82fcd..0000000
--- a/core/src/main/java/org/carbondata/core/util/CarbonLoadStatisticsDummy.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.carbondata.core.util;
-
-public class CarbonLoadStatisticsDummy implements LoadStatistics {
-  private CarbonLoadStatisticsDummy() {
-
-  }
-
-  private static CarbonLoadStatisticsDummy carbonLoadStatisticsDummyInstance =
-      new CarbonLoadStatisticsDummy();
-
-  public static CarbonLoadStatisticsDummy getInstance() {
-    return carbonLoadStatisticsDummyInstance;
-  }
-
-  @Override
-  public void  initPartitonInfo(String PartitionId) {
-
-  }
-
-  @Override
-  public void recordDicShuffleAndWriteTime() {
-
-  }
-
-  @Override
-  public void recordLoadCsvfilesToDfTime() {
-
-  }
-
-  @Override
-  public void recordDictionaryValuesTotalTime(String partitionID,
-      Long dictionaryValuesTotalTimeTimePoint) {
-
-  }
-
-  @Override
-  public void recordCsvInputStepTime(String partitionID, Long csvInputStepTimePoint) {
-
-  }
-
-  @Override
-  public void recordLruCacheLoadTime(double lruCacheLoadTime) {
-
-  }
-
-  @Override
-  public void recordGeneratingDictionaryValuesTime(String partitionID,
-      Long generatingDictionaryValuesTimePoint) {
-
-  }
-
-  @Override
-  public void recordSortRowsStepTotalTime(String partitionID, Long sortRowsStepTotalTimePoint) {
-
-  }
-
-  @Override
-  public void recordMdkGenerateTotalTime(String partitionID, Long mdkGenerateTotalTimePoint) {
-
-  }
-
-  @Override
-  public void recordDictionaryValue2MdkAdd2FileTime(String partitionID,
-      Long dictionaryValue2MdkAdd2FileTimePoint) {
-
-  }
-
-  @Override
-  public void recordTotalRecords(long totalRecords) {
-
-  }
-
-  @Override
-  public void recordHostBlockMap(String host, Integer numBlocks) {
-
-  }
-
-  @Override
-  public void recordPartitionBlockMap(String partitionID, Integer numBlocks) {
-
-  }
-
-  @Override
-  public void printStatisticsInfo(String partitionID) {
-
-  }
-}



[41/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalDefault.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalDefault.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalDefault.java
new file mode 100644
index 0000000..9146705
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalDefault.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNonDecimalDefault
+    implements ValueCompressonHolder.UnCompressValue<double[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalDefault.class.getName());
+  /**
+   * doubleCompressor.
+   */
+  private static Compressor<double[]> doubleCompressor =
+      SnappyCompression.SnappyDoubleCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private double[] value;
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException cnse1) {
+      LOGGER.error(cnse1, cnse1.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
+    byte1.setValue(doubleCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
+    return null;
+  }
+
+  @Override public void setValue(double[] value) {
+    this.value = value;
+
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToDoubleArray(buffer, value.length);
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double[] dblVals = new double[value.length];
+    for (int i = 0; i < dblVals.length; i++) {
+      dblVals[i] = value[i] / Math.pow(10, decimal);
+    }
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    dataHolder.setReadableDoubleValues(dblVals);
+    return dataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalFloat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalFloat.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalFloat.java
new file mode 100644
index 0000000..487e943
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalFloat.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressNonDecimalFloat implements ValueCompressonHolder.UnCompressValue<float[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalFloat.class.getName());
+  /**
+   * floatCompressor
+   */
+  private static Compressor<float[]> floatCompressor =
+      SnappyCompression.SnappyFloatCompression.INSTANCE;
+  /**
+   * value.
+   */
+
+  private float[] value;
+
+  @Override public void setValue(float[] value) {
+    this.value = value;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException cnsexception) {
+      LOGGER
+          .error(cnsexception, cnsexception.getMessage());
+    }
+    return null;
+  }
+
+  public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
+    byte1.setValue(floatCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override
+  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
+    return null;
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToFloatArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double[] vals = new double[value.length];
+    for (int m = 0; m < vals.length; m++) {
+      vals[m] = value[m] / Math.pow(10, decimal);
+    }
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalInt.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalInt.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalInt.java
new file mode 100644
index 0000000..0e0c2b9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalInt.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNonDecimalInt implements UnCompressValue<int[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalInt.class.getName());
+  /**
+   * intCompressor.
+   */
+  private static Compressor<int[]> intCompressor = SnappyCompression.SnappyIntCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private int[] value;
+
+  @Override public void setValue(int[] value) {
+    this.value = (int[]) value;
+
+  }
+
+  @Override public UnCompressValue getNew() {
+    try {
+      return (UnCompressValue) clone();
+    } catch (CloneNotSupportedException csne1) {
+      LOGGER.error(csne1, csne1.getMessage());
+    }
+    return null;
+  }
+
+  @Override public UnCompressValue compress() {
+    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
+    byte1.setValue(intCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] bytesArr) {
+    ByteBuffer buffer = ByteBuffer.wrap(bytesArr);
+    this.value = ValueCompressionUtil.convertToIntArray(buffer, bytesArr.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double[] vals = new double[value.length];
+    for (int k = 0; k < vals.length; k++) {
+      vals[k] = value[k] / Math.pow(10, decimal);
+    }
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+
+  @Override public UnCompressValue uncompress(DataType dataType) {
+    return null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalLong.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalLong.java
new file mode 100644
index 0000000..3fb817e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalLong.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNonDecimalLong implements UnCompressValue<long[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalLong.class.getName());
+
+  /**
+   * longCompressor.
+   */
+  private static Compressor<long[]> longCompressor =
+      SnappyCompression.SnappyLongCompression.INSTANCE;
+
+  /**
+   * value.
+   */
+  private long[] value;
+
+  @Override public void setValue(long[] value) {
+    this.value = value;
+  }
+
+  @Override public UnCompressValue compress() {
+    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
+    byte1.setValue(longCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public UnCompressValue getNew() {
+    try {
+      return (UnCompressValue) clone();
+    } catch (CloneNotSupportedException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return null;
+  }
+
+  @Override public UnCompressValue uncompress(DataType dataType) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] bytes) {
+    ByteBuffer buffer = ByteBuffer.wrap(bytes);
+    this.value = ValueCompressionUtil.convertToLongArray(buffer, bytes.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double[] vals = new double[value.length];
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i] / Math.pow(10, decimal);
+    }
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinByte.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinByte.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinByte.java
new file mode 100644
index 0000000..1533679
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinByte.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNonDecimalMaxMinByte
+    implements ValueCompressonHolder.UnCompressValue<byte[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinByte.class.getName());
+  /**
+   * byteCompressor.
+   */
+  private static Compressor<byte[]> byteCompressor =
+      SnappyCompression.SnappyByteCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private byte[] value;
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException cloneNotSupportedException) {
+      LOGGER.error(cloneNotSupportedException,
+          cloneNotSupportedException.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressNonDecimalMaxMinByte byte1 = new UnCompressNonDecimalMaxMinByte();
+    byte1.setValue(byteCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
+    ValueCompressonHolder.UnCompressValue byte1 =
+        ValueCompressionUtil.unCompressNonDecimalMaxMin(dataType, dataType);
+    ValueCompressonHolder.unCompress(dataType, byte1, value);
+    return byte1;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return value;
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalMaxMinByte();
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    this.value = value;
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimalVal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i] / Math.pow(10, decimalVal);
+
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = (maxValue - value[i]) / Math.pow(10, decimalVal);
+      }
+
+    }
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+
+  @Override public void setValue(byte[] value) {
+    this.value = value;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinDefault.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinDefault.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinDefault.java
new file mode 100644
index 0000000..ccb765a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinDefault.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNonDecimalMaxMinDefault implements UnCompressValue<double[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinDefault.class.getName());
+  /**
+   * doubleCompressor.
+   */
+  private static Compressor<double[]> doubleCompressor =
+      SnappyCompression.SnappyDoubleCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private double[] value;
+
+  @Override public void setValue(double[] value) {
+    this.value = (double[]) value;
+  }
+
+  @Override public UnCompressValue getNew() {
+    try {
+      return (UnCompressValue) clone();
+    } catch (CloneNotSupportedException exce) {
+      LOGGER.error(exce, exce.getMessage());
+    }
+    return null;
+  }
+
+  @Override public UnCompressValue compress() {
+    UnCompressNonDecimalMaxMinByte byte1 = new UnCompressNonDecimalMaxMinByte();
+    byte1.setValue(doubleCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToDoubleArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalMaxMinByte();
+  }
+
+  @Override public UnCompressValue uncompress(DataType dataType) {
+    return null;
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double maxVal = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder holder = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i] / Math.pow(10, decimal);
+
+      if (value[i] == 0) {
+        vals[i] = maxVal;
+      } else {
+        vals[i] = (maxVal - value[i]) / Math.pow(10, decimal);
+      }
+
+    }
+    holder.setReadableDoubleValues(vals);
+    return holder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinFloat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinFloat.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinFloat.java
new file mode 100644
index 0000000..16bf101
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinFloat.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressNonDecimalMaxMinFloat
+    implements ValueCompressonHolder.UnCompressValue<float[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinFloat.class.getName());
+  /**
+   * floatCompressor
+   */
+  private static Compressor<float[]> floatCompressor =
+      SnappyCompression.SnappyFloatCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private float[] value;
+
+  @Override public void setValue(float[] value) {
+    this.value = value;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException exc1) {
+      LOGGER.error(exc1, exc1.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+
+    UnCompressNonDecimalMaxMinByte byte1 = new UnCompressNonDecimalMaxMinByte();
+    byte1.setValue(floatCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override
+  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
+    return null;
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToFloatArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalMaxMinByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder holder = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i] / Math.pow(10, decimal);
+
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = (maxValue - value[i]) / Math.pow(10, decimal);
+      }
+    }
+    holder.setReadableDoubleValues(vals);
+    return holder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinInt.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinInt.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinInt.java
new file mode 100644
index 0000000..c0ab9c0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinInt.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNonDecimalMaxMinInt implements UnCompressValue<int[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinInt.class.getName());
+  /**
+   * intCompressor.
+   */
+  private static Compressor<int[]> intCompressor = SnappyCompression.SnappyIntCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private int[] value;
+
+  @Override public void setValue(int[] value) {
+    this.value = value;
+
+  }
+
+  @Override public UnCompressValue getNew() {
+    try {
+      return (UnCompressValue) clone();
+    } catch (CloneNotSupportedException ex1) {
+      LOGGER.error(ex1, ex1.getMessage());
+    }
+    return null;
+  }
+
+  @Override public UnCompressValue compress() {
+
+    UnCompressNonDecimalMaxMinByte byte1 = new UnCompressNonDecimalMaxMinByte();
+    byte1.setValue(intCompressor.compress(value));
+    return byte1;
+
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToIntArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalMaxMinByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder dataHolderInfo = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i] / Math.pow(10, decimal);
+
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = (maxValue - value[i]) / Math.pow(10, decimal);
+      }
+
+    }
+    dataHolderInfo.setReadableDoubleValues(vals);
+    return dataHolderInfo;
+  }
+
+  @Override public UnCompressValue uncompress(DataType dataType) {
+    return null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinLong.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinLong.java
new file mode 100644
index 0000000..d8ed560
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinLong.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressNonDecimalMaxMinLong
+    implements ValueCompressonHolder.UnCompressValue<long[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinLong.class.getName());
+
+  /**
+   * longCompressor.
+   */
+  private static Compressor<long[]> longCompressor =
+      SnappyCompression.SnappyLongCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private long[] value;
+
+  @Override public void setValue(long[] value) {
+    this.value = value;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException exc) {
+      LOGGER.error(exc, exc.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+
+    UnCompressNonDecimalMaxMinByte uNonDecByte = new UnCompressNonDecimalMaxMinByte();
+    uNonDecByte.setValue(longCompressor.compress(value));
+    return uNonDecByte;
+  }
+
+  @Override
+  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buff = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToLongArray(buff, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalMaxMinByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder carbonDataHolder = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i] / Math.pow(10, decimal);
+
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = (maxValue - value[i]) / Math.pow(10, decimal);
+      }
+
+    }
+    carbonDataHolder.setReadableDoubleValues(vals);
+    return carbonDataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinShort.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinShort.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinShort.java
new file mode 100644
index 0000000..f110560
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalMaxMinShort.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressNonDecimalMaxMinShort
+    implements ValueCompressonHolder.UnCompressValue<short[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalMaxMinShort.class.getName());
+  /**
+   * shortCompressor.
+   */
+  private static Compressor<short[]> shortCompressor =
+      SnappyCompression.SnappyShortCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private short[] value;
+
+  @Override public void setValue(short[] value) {
+    this.value = value;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException exception5) {
+      LOGGER.error(exception5, exception5.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressNonDecimalMaxMinByte byte1 = new UnCompressNonDecimalMaxMinByte();
+    byte1.setValue(shortCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue uncompress(
+      ValueCompressionUtil.DataType dataTypeVal) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToShortArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalMaxMinByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i] / Math.pow(10, decimal);
+
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = (maxValue - value[i]) / Math.pow(10, decimal);
+      }
+
+    }
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalShort.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalShort.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalShort.java
new file mode 100644
index 0000000..852fd12
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalShort.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressNonDecimalShort implements ValueCompressonHolder.UnCompressValue<short[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalShort.class.getName());
+  /**
+   * shortCompressor.
+   */
+  private static Compressor<short[]> shortCompressor =
+      SnappyCompression.SnappyShortCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private short[] value;
+
+  @Override public void setValue(short[] value) {
+    this.value = value;
+
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException exception1) {
+      LOGGER.error(exception1, exception1.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
+    byte1.setValue(shortCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override
+  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
+    return null;
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToShortArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    double[] vals = new double[value.length];
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i] / Math.pow(10, decimal);
+    }
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneByte.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneByte.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneByte.java
new file mode 100644
index 0000000..d689f93
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneByte.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNoneByte implements UnCompressValue<byte[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNoneByte.class.getName());
+
+  /**
+   * byteCompressor.
+   */
+  private static Compressor<byte[]> byteCompressor =
+      SnappyCompression.SnappyByteCompression.INSTANCE;
+
+  /**
+   * value.
+   */
+  private byte[] value;
+
+  @Override public UnCompressValue getNew() {
+    try {
+      return (UnCompressValue) clone();
+    } catch (CloneNotSupportedException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return null;
+  }
+
+  @Override public void setValue(byte[] value) {
+    this.value = value;
+  }
+
+  @Override public UnCompressValue uncompress(DataType dataType) {
+    UnCompressValue byte1 = ValueCompressionUtil.unCompressNone(dataType, dataType);
+    ValueCompressonHolder.unCompress(dataType, byte1, value);
+    return byte1;
+  }
+
+  @Override public UnCompressValue compress() {
+    UnCompressNoneByte byte1 = new UnCompressNoneByte();
+    byte1.setValue(byteCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return value;
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    this.value = value;
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public UnCompressValue getCompressorObject() {
+    return new UnCompressNoneByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    CarbonReadDataHolder dataHldr = new CarbonReadDataHolder();
+    double[] vals = new double[value.length];
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i];
+    }
+    dataHldr.setReadableDoubleValues(vals);
+    return dataHldr;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneDefault.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneDefault.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneDefault.java
new file mode 100644
index 0000000..c1395b4
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneDefault.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNoneDefault implements UnCompressValue<double[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNoneDefault.class.getName());
+  /**
+   * doubleCompressor.
+   */
+  private static Compressor<double[]> doubleCompressor =
+      SnappyCompression.SnappyDoubleCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private double[] value;
+
+  @Override public void setValue(double[] value) {
+    this.value = value;
+
+  }
+
+  @Override public UnCompressValue getNew() {
+    try {
+      return (UnCompressValue) clone();
+    } catch (CloneNotSupportedException exception1) {
+      LOGGER.error(exception1, exception1.getMessage());
+    }
+    return null;
+  }
+
+  @Override public UnCompressValue compress() {
+    UnCompressNoneByte byte1 = new UnCompressNoneByte();
+    byte1.setValue(doubleCompressor.compress(value));
+
+    return byte1;
+  }
+
+  @Override public UnCompressValue uncompress(DataType dataType) {
+    return null;
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public UnCompressValue getCompressorObject() {
+    return new UnCompressNoneByte();
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToDoubleArray(buffer, value.length);
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    dataHolder.setReadableDoubleValues(value);
+    return dataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneFloat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneFloat.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneFloat.java
new file mode 100644
index 0000000..103b1c9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneFloat.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressNoneFloat implements ValueCompressonHolder.UnCompressValue<float[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNoneFloat.class.getName());
+  /**
+   * floatCompressor
+   */
+  private static Compressor<float[]> floatCompressor =
+      SnappyCompression.SnappyFloatCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private float[] value;
+
+  @Override public void setValue(float[] value) {
+    this.value = value;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException ex5) {
+      LOGGER.error(ex5, ex5.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressNoneByte byte1 = new UnCompressNoneByte();
+    byte1.setValue(floatCompressor.compress(value));
+
+    return byte1;
+
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToFloatArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNoneByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i];
+    }
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+
+  @Override
+  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneInt.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneInt.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneInt.java
new file mode 100644
index 0000000..ca5593d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneInt.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNoneInt implements ValueCompressonHolder.UnCompressValue<int[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNoneInt.class.getName());
+  /**
+   * intCompressor.
+   */
+  private static Compressor<int[]> intCompressor = SnappyCompression.SnappyIntCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private int[] value;
+
+  @Override public void setValue(int[] value) {
+    this.value = value;
+
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException exc) {
+      LOGGER.error(exc, exc.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressNoneByte byte1 = new UnCompressNoneByte();
+    byte1.setValue(intCompressor.compress(value));
+
+    return byte1;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
+    return null;
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToIntArray(buffer, value.length);
+
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNoneByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    CarbonReadDataHolder dataHolderInfoObj = new CarbonReadDataHolder();
+    double[] vals = new double[value.length];
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i];
+    }
+
+    dataHolderInfoObj.setReadableDoubleValues(vals);
+    return dataHolderInfoObj;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneLong.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneLong.java
new file mode 100644
index 0000000..47220c5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneLong.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressNoneLong implements ValueCompressonHolder.UnCompressValue<long[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNoneLong.class.getName());
+  /**
+   * longCompressor.
+   */
+  private static Compressor<long[]> longCompressor =
+      SnappyCompression.SnappyLongCompression.INSTANCE;
+  /**
+   * value.
+   */
+  protected long[] value;
+
+  @Override public void setValue(long[] value) {
+    this.value = value;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException clnNotSupportedExc) {
+      LOGGER.error(clnNotSupportedExc,
+          clnNotSupportedExc.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressNoneByte byte1 = new UnCompressNoneByte();
+    byte1.setValue(longCompressor.compress(value));
+    return byte1;
+
+  }
+
+  @Override
+  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dType) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] byteValue) {
+    ByteBuffer buffer = ByteBuffer.wrap(byteValue);
+    this.value = ValueCompressionUtil.convertToLongArray(buffer, byteValue.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNoneByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    double[] vals = new double[value.length];
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i];
+    }
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneShort.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneShort.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneShort.java
new file mode 100644
index 0000000..ed41ca5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNoneShort.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNoneShort implements ValueCompressonHolder.UnCompressValue<short[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNoneShort.class.getName());
+
+  /**
+   * shortCompressor.
+   */
+  private static Compressor<short[]> shortCompressor =
+      SnappyCompression.SnappyShortCompression.INSTANCE;
+
+  /**
+   * value.
+   */
+  private short[] shortValue;
+
+  @Override public void setValue(short[] shortValue) {
+    this.shortValue = shortValue;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException cns1) {
+      LOGGER.error(cns1, cns1.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+
+    UnCompressNoneByte byte1 = new UnCompressNoneByte();
+    byte1.setValue(shortCompressor.compress(shortValue));
+
+    return byte1;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(shortValue);
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    shortValue = ValueCompressionUtil.convertToShortArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNoneByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    double[] vals = new double[shortValue.length];
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = shortValue[i];
+    }
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/dataholder/CarbonReadDataHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/dataholder/CarbonReadDataHolder.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/dataholder/CarbonReadDataHolder.java
new file mode 100644
index 0000000..79507ce
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/dataholder/CarbonReadDataHolder.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.dataholder;
+
+import java.math.BigDecimal;
+
+public class CarbonReadDataHolder {
+
+  /**
+   * doubleValues
+   */
+  private double[] doubleValues;
+
+  /**
+   * longValues
+   */
+  private long[] longValues;
+
+  /**
+   * bigDecimalValues
+   */
+  private BigDecimal[] bigDecimalValues;
+
+  /**
+   * byteValues
+   */
+  private byte[][] byteValues;
+
+  /**
+   * @return the doubleValues
+   */
+  public double[] getReadableDoubleValues() {
+    return doubleValues;
+  }
+
+  /**
+   * @param doubleValues the doubleValues to set
+   */
+  public void setReadableDoubleValues(double[] doubleValues) {
+    this.doubleValues = doubleValues;
+  }
+
+  /**
+   * @return the byteValues
+   */
+  public byte[][] getReadableByteArrayValues() {
+    return byteValues;
+  }
+
+  /**
+   * @param longValues the longValues to set
+   */
+  public void setReadableLongValues(long[] longValues) {
+    this.longValues = longValues;
+  }
+
+  /**
+   * @param longValues the bigDecimalValues to set
+   */
+  public void setReadableBigDecimalValues(BigDecimal[] bigDecimalValues) {
+    this.bigDecimalValues = bigDecimalValues;
+  }
+
+  /**
+   * @param byteValues the byteValues to set
+   */
+  public void setReadableByteValues(byte[][] byteValues) {
+    this.byteValues = byteValues;
+  }
+
+  /**
+   * below method will be used to get the double value by index
+   *
+   * @param index
+   * @return double values
+   */
+  public double getReadableDoubleValueByIndex(int index) {
+    return this.doubleValues[index];
+  }
+
+  public long getReadableLongValueByIndex(int index) {
+    return this.longValues[index];
+  }
+
+  public BigDecimal getReadableBigDecimalValueByIndex(int index) {
+    return this.bigDecimalValues[index];
+  }
+
+  /**
+   * below method will be used to get the readable byte array value by index
+   *
+   * @param index
+   * @return byte array value
+   */
+  public byte[] getReadableByteArrayValueByIndex(int index) {
+    return this.byteValues[index];
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/dataholder/CarbonWriteDataHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/dataholder/CarbonWriteDataHolder.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/dataholder/CarbonWriteDataHolder.java
new file mode 100644
index 0000000..951857e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/dataholder/CarbonWriteDataHolder.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.dataholder;
+
+public class CarbonWriteDataHolder {
+  /**
+   * doubleValues
+   */
+  private double[] doubleValues;
+
+  /**
+   * longValues
+   */
+  private long[] longValues;
+
+  /**
+   * byteValues
+   */
+  private byte[][] byteValues;
+
+  /**
+   * byteValues
+   */
+  private byte[][][] columnByteValues;
+
+  /**
+   * size
+   */
+  private int size;
+
+  /**
+   * totalSize
+   */
+  private int totalSize;
+
+  /**
+   * Method to initialise double array
+   *
+   * @param size
+   */
+  public void initialiseDoubleValues(int size) {
+    if (size < 1) {
+      throw new IllegalArgumentException("Invalid array size");
+    }
+    doubleValues = new double[size];
+  }
+
+  public void reset() {
+    size = 0;
+    totalSize = 0;
+  }
+
+  /**
+   * Method to initialise double array
+   *
+   * @param size
+   */
+  public void initialiseByteArrayValues(int size) {
+    if (size < 1) {
+      throw new IllegalArgumentException("Invalid array size");
+    }
+
+    byteValues = new byte[size][];
+    columnByteValues = new byte[size][][];
+  }
+
+  /**
+   * Method to initialise long array
+   *
+   * @param size
+   */
+  public void initialiseLongValues(int size) {
+    if (size < 1) {
+      throw new IllegalArgumentException("Invalid array size");
+    }
+    longValues = new long[size];
+  }
+
+  /**
+   * set double value by index
+   *
+   * @param index
+   * @param value
+   */
+  public void setWritableDoubleValueByIndex(int index, Object value) {
+    doubleValues[index] = (Double) value;
+    size++;
+  }
+
+  /**
+   * set double value by index
+   *
+   * @param index
+   * @param value
+   */
+  public void setWritableLongValueByIndex(int index, Object value) {
+    longValues[index] = (Long) value;
+    size++;
+  }
+
+  /**
+   * set byte array value by index
+   *
+   * @param index
+   * @param value
+   */
+  public void setWritableByteArrayValueByIndex(int index, byte[] value) {
+    byteValues[index] = value;
+    size++;
+    if (null != value) totalSize += value.length;
+  }
+
+  /**
+   * set byte array value by index
+   */
+  public void setWritableByteArrayValueByIndex(int index, int mdKeyIndex, Object[] columnData) {
+    int l = 0;
+    columnByteValues[index] = new byte[columnData.length - (mdKeyIndex + 1)][];
+    for (int i = mdKeyIndex + 1; i < columnData.length; i++) {
+      columnByteValues[index][l++] = (byte[]) columnData[i];
+    }
+  }
+
+  /**
+   * Get Writable Double Values
+   */
+  public double[] getWritableDoubleValues() {
+    if (size < doubleValues.length) {
+      double[] temp = new double[size];
+      System.arraycopy(doubleValues, 0, temp, 0, size);
+      doubleValues = temp;
+    }
+    return doubleValues;
+  }
+
+  /**
+   * Get writable byte array values
+   */
+  public byte[] getWritableByteArrayValues() {
+    byte[] temp = new byte[totalSize];
+    int startIndexToCopy = 0;
+    for (int i = 0; i < size; i++) {
+      System.arraycopy(byteValues[i], 0, temp, startIndexToCopy, byteValues[i].length);
+      startIndexToCopy += byteValues[i].length;
+    }
+    return temp;
+  }
+
+  public byte[][] getByteArrayValues() {
+    if (size < byteValues.length) {
+      byte[][] temp = new byte[size][];
+      System.arraycopy(byteValues, 0, temp, 0, size);
+      byteValues = temp;
+    }
+    return byteValues;
+  }
+
+  /**
+   * Get Writable Double Values
+   *
+   * @return
+   */
+  public long[] getWritableLongValues() {
+    if (size < longValues.length) {
+      long[] temp = new long[size];
+      System.arraycopy(longValues, 0, temp, 0, size);
+      longValues = temp;
+    }
+    return longValues;
+  }
+}


[48/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ColumnReverseDictionaryInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ColumnReverseDictionaryInfo.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ColumnReverseDictionaryInfo.java
new file mode 100644
index 0000000..03a6d6a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ColumnReverseDictionaryInfo.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.CarbonProperties;
+
+import net.jpountz.xxhash.XXHash32;
+import net.jpountz.xxhash.XXHashFactory;
+
+/**
+ * class that implements methods specific for dictionary data look up
+ */
+public class ColumnReverseDictionaryInfo extends AbstractColumnDictionaryInfo {
+
+  /**
+   * Map which will maintain mapping of byte array to surrogate key
+   */
+  private Map<DictionaryByteArrayWrapper, Integer> dictionaryByteArrayToSurrogateKeyMap;
+
+  /**
+   * hashing algorithm to calculate hash code
+   */
+  private XXHash32 xxHash32;
+
+  /**
+   * check and initialize xxHash32 if enabled
+   */
+  public ColumnReverseDictionaryInfo() {
+    boolean useXXHash = Boolean.valueOf(CarbonProperties.getInstance()
+        .getProperty(CarbonCommonConstants.ENABLE_XXHASH,
+            CarbonCommonConstants.ENABLE_XXHASH_DEFAULT));
+    if (useXXHash) {
+      xxHash32 = XXHashFactory.fastestInstance().hash32();
+    }
+  }
+
+  /**
+   * This method will find and return the surrogate key for a given dictionary value
+   * Applicable scenario:
+   * 1. Incremental data load : Dictionary will not be generated for existing values. For
+   * that values have to be looked up in the existing dictionary cache.
+   * 2. Filter scenarios where from value surrogate key has to be found.
+   *
+   * @param value dictionary value as byte array. It will be treated as key here
+   * @return if found returns key else 0
+   */
+  @Override public int getSurrogateKey(byte[] value) {
+    DictionaryByteArrayWrapper dictionaryByteArrayWrapper =
+        new DictionaryByteArrayWrapper(value, xxHash32);
+    Integer surrogateKeyInMap =
+        dictionaryByteArrayToSurrogateKeyMap.get(dictionaryByteArrayWrapper);
+    if (null == surrogateKeyInMap) {
+      return CarbonCommonConstants.INVALID_SURROGATE_KEY;
+    }
+    return surrogateKeyInMap;
+  }
+
+  /**
+   * This method will add a new dictionary chunk to existing list of dictionary chunks
+   *
+   * @param dictionaryChunk
+   */
+  @Override public void addDictionaryChunk(List<byte[]> dictionaryChunk) {
+    dictionaryChunks.add(dictionaryChunk);
+    if (null == dictionaryByteArrayToSurrogateKeyMap) {
+      createDictionaryByteArrayToSurrogateKeyMap(dictionaryChunk.size());
+    }
+    addDataToDictionaryMap();
+  }
+
+  /**
+   * This method will add the new dictionary data to map
+   */
+  private void addDataToDictionaryMap() {
+    int surrogateKey = dictionaryByteArrayToSurrogateKeyMap.size();
+    List<byte[]> oneDictionaryChunk = dictionaryChunks.get(dictionaryChunks.size() - 1);
+    for (int i = 0; i < oneDictionaryChunk.size(); i++) {
+      // create a wrapper class that will calculate hash code for byte array
+      DictionaryByteArrayWrapper dictionaryByteArrayWrapper =
+          new DictionaryByteArrayWrapper(oneDictionaryChunk.get(i), xxHash32);
+      dictionaryByteArrayToSurrogateKeyMap.put(dictionaryByteArrayWrapper, ++surrogateKey);
+    }
+  }
+
+  /**
+   * This method will create the dictionary map. First time it will
+   * create dictionary map with capacity equal to list of byte arrays
+   *
+   * @param initialMapSize capacity to which map is to be instantiated
+   */
+  private void createDictionaryByteArrayToSurrogateKeyMap(int initialMapSize) {
+    dictionaryByteArrayToSurrogateKeyMap = new ConcurrentHashMap<>(initialMapSize);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/Dictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/Dictionary.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/Dictionary.java
new file mode 100644
index 0000000..d29be86
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/Dictionary.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+/**
+ * dictionary interface which declares methods for finding surrogate key for a
+ * given dictionary value and finding dictionary value from a given surrogate key
+ */
+public interface Dictionary {
+
+  /**
+   * This method will find and return the surrogate key for a given dictionary value
+   * Applicable scenario:
+   * 1. Incremental data load : Dictionary will not be generated for existing values. For
+   * that values have to be looked up in the existing dictionary cache.
+   * 2. Filter scenarios where from value surrogate key has to be found.
+   *
+   * @param value dictionary value
+   * @return if found returns key else 0
+   */
+  int getSurrogateKey(String value);
+
+  /**
+   * This method will find and return the surrogate key for a given dictionary value
+   * Applicable scenario:
+   * 1. Incremental data load : Dictionary will not be generated for existing values. For
+   * that values have to be looked up in the existing dictionary cache.
+   * 2. Filter scenarios where from value surrogate key has to be found.
+   *
+   * @param value dictionary value as byte array
+   * @return if found returns key else -1
+   */
+  int getSurrogateKey(byte[] value);
+
+  /**
+   * This method will find and return the dictionary value for a given surrogate key.
+   * Applicable scenarios:
+   * 1. Query final result preparation : While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return value if found else null
+   */
+  String getDictionaryValueForKey(int surrogateKey);
+
+  /**
+   * This method will find and return the sort index for a given dictionary id.
+   * Applicable scenarios:
+   * 1. Used in case of order by queries when data sorting is required
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return if found returns key else 0
+   */
+  int getSortedIndex(int surrogateKey);
+
+  /**
+   * This method will find and return the dictionary value from sorted index.
+   * Applicable scenarios:
+   * 1. Query final result preparation in case of order by queries:
+   * While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param sortedIndex sort index of dictionary value
+   * @return value if found else null
+   */
+  String getDictionaryValueFromSortedIndex(int sortedIndex);
+
+  /**
+   * The method return the dictionary chunks wrapper of a column
+   * The wrapper wraps the list<list<bye[]>> and provide the iterator to retrieve the chunks
+   * members.
+   * Applications Scenario:
+   * For preparing the column Sort info while writing the sort index file.
+   *
+   * @return
+   */
+  DictionaryChunksWrapper getDictionaryChunks();
+
+  /**
+   * This method will release the objects and set default value for primitive types
+   */
+  void clear();
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryByteArrayWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryByteArrayWrapper.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryByteArrayWrapper.java
new file mode 100644
index 0000000..73f7702
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryByteArrayWrapper.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.util.Arrays;
+
+import org.apache.carbondata.core.util.ByteUtil;
+
+import net.jpountz.xxhash.XXHash32;
+
+/**
+ * class that holds the byte array and overrides equals and hashcode method which
+ * will be useful for object comparison
+ */
+public class DictionaryByteArrayWrapper {
+
+  /**
+   * dictionary value as byte array
+   */
+  private byte[] data;
+
+  /**
+   * hashing algorithm to calculate hash code
+   */
+  private XXHash32 xxHash32;
+
+  /**
+   * @param data
+   */
+  public DictionaryByteArrayWrapper(byte[] data) {
+    this.data = data;
+  }
+
+  /**
+   * @param data
+   * @param xxHash32
+   */
+  public DictionaryByteArrayWrapper(byte[] data, XXHash32 xxHash32) {
+    this(data);
+    this.xxHash32 = xxHash32;
+  }
+
+  /**
+   * This method will compare 2 DictionaryByteArrayWrapper objects
+   *
+   * @param other
+   * @return
+   */
+  @Override public boolean equals(Object other) {
+    if (this == other) {
+      return true;
+    }
+    if (other == null || getClass() != other.getClass()) {
+      return false;
+    }
+    DictionaryByteArrayWrapper otherObjectToCompare = (DictionaryByteArrayWrapper) other;
+    if (data.length != otherObjectToCompare.data.length) {
+      return false;
+    }
+    return ByteUtil.UnsafeComparer.INSTANCE.equals(data, otherObjectToCompare.data);
+
+  }
+
+  /**
+   * This method will calculate the hash code for given data
+   *
+   * @return
+   */
+  @Override public int hashCode() {
+    if (null != xxHash32) {
+      return xxHash32.hash(data, 0, data.length, 0);
+    }
+    int result = Arrays.hashCode(data);
+    result = 31 * result;
+    return result;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryCacheLoader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryCacheLoader.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryCacheLoader.java
new file mode 100644
index 0000000..8da437b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryCacheLoader.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.io.IOException;
+
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+
+public interface DictionaryCacheLoader {
+
+  /**
+   * This method will load the dictionary data for a given columnIdentifier
+   *
+   * @param dictionaryInfo             dictionary info object which will hold the required data
+   *                                   for a given column
+   * @param columnIdentifier           column unique identifier
+   * @param dictionaryChunkStartOffset start offset from where dictionary file has to
+   *                                   be read
+   * @param dictionaryChunkEndOffset   end offset till where dictionary file has to
+   *                                   be read
+   * @param loadSortIndex              flag to indicate whether the sort index file has to be
+   *                                   read in memory after dictionary loading
+   * @throws IOException
+   */
+  void load(DictionaryInfo dictionaryInfo, ColumnIdentifier columnIdentifier,
+      long dictionaryChunkStartOffset, long dictionaryChunkEndOffset, boolean loadSortIndex)
+      throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryCacheLoaderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryCacheLoaderImpl.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryCacheLoaderImpl.java
new file mode 100644
index 0000000..6e603f9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryCacheLoaderImpl.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.carbondata.common.factory.CarbonCommonFactory;
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.reader.CarbonDictionaryReader;
+import org.apache.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReader;
+import org.apache.carbondata.core.service.DictionaryService;
+
+/**
+ * This class is responsible for loading the dictionary data for given columns
+ */
+public class DictionaryCacheLoaderImpl implements DictionaryCacheLoader {
+
+  /**
+   * carbon table identifier
+   */
+  private CarbonTableIdentifier carbonTableIdentifier;
+
+  /**
+   * carbon store path
+   */
+  private String carbonStorePath;
+
+  /**
+   * @param carbonTableIdentifier fully qualified table name
+   * @param carbonStorePath       hdfs store path
+   */
+  public DictionaryCacheLoaderImpl(CarbonTableIdentifier carbonTableIdentifier,
+      String carbonStorePath) {
+    this.carbonTableIdentifier = carbonTableIdentifier;
+    this.carbonStorePath = carbonStorePath;
+  }
+
+  /**
+   * This method will load the dictionary data for a given columnIdentifier
+   *
+   * @param dictionaryInfo             dictionary info object which will hold the required data
+   *                                   for a given column
+   * @param columnIdentifier           column unique identifier
+   * @param dictionaryChunkStartOffset start offset from where dictionary file has to
+   *                                   be read
+   * @param dictionaryChunkEndOffset   end offset till where dictionary file has to
+   *                                   be read
+   * @param loadSortIndex              flag to indicate whether the sort index file has to be
+   *                                   read in memory after dictionary loading
+   * @throws IOException
+   */
+  @Override public void load(DictionaryInfo dictionaryInfo, ColumnIdentifier columnIdentifier,
+      long dictionaryChunkStartOffset, long dictionaryChunkEndOffset, boolean loadSortIndex)
+      throws IOException {
+    List<byte[]> dictionaryChunk =
+        load(columnIdentifier, dictionaryChunkStartOffset, dictionaryChunkEndOffset);
+    if (loadSortIndex) {
+      readSortIndexFile(dictionaryInfo, columnIdentifier);
+    }
+    dictionaryInfo.addDictionaryChunk(dictionaryChunk);
+  }
+
+  /**
+   * This method will load the dictionary data between a given start and end offset
+   *
+   * @param columnIdentifier column unique identifier
+   * @param startOffset      start offset of dictionary file
+   * @param endOffset        end offset of dictionary file
+   * @return list of dictionary value
+   * @throws IOException
+   */
+  private List<byte[]> load(ColumnIdentifier columnIdentifier, long startOffset, long endOffset)
+      throws IOException {
+    CarbonDictionaryReader dictionaryReader = getDictionaryReader(columnIdentifier);
+    List<byte[]> dictionaryValue = null;
+    try {
+      dictionaryValue = dictionaryReader.read(startOffset, endOffset);
+    } finally {
+      dictionaryReader.close();
+    }
+    return dictionaryValue;
+  }
+
+  /**
+   * This method will read the sort index file and load into memory
+   *
+   * @param dictionaryInfo
+   * @param columnIdentifier
+   * @throws IOException
+   */
+  private void readSortIndexFile(DictionaryInfo dictionaryInfo, ColumnIdentifier columnIdentifier)
+      throws IOException {
+    CarbonDictionarySortIndexReader sortIndexReader = getSortIndexReader(columnIdentifier);
+    try {
+      dictionaryInfo.setSortOrderIndex(sortIndexReader.readSortIndex());
+      dictionaryInfo.setSortReverseOrderIndex(sortIndexReader.readInvertedSortIndex());
+    } finally {
+      sortIndexReader.close();
+    }
+  }
+
+  /**
+   * This method will create a dictionary reader instance to read the dictionary file
+   *
+   * @param columnIdentifier unique column identifier
+   * @return carbon dictionary reader instance
+   */
+  private CarbonDictionaryReader getDictionaryReader(ColumnIdentifier columnIdentifier) {
+    DictionaryService dictService = CarbonCommonFactory.getDictionaryService();
+    return dictService
+        .getDictionaryReader(carbonTableIdentifier, columnIdentifier, carbonStorePath);
+  }
+
+  /**
+   * @param columnIdentifier unique column identifier
+   * @return sort index reader instance
+   */
+  private CarbonDictionarySortIndexReader getSortIndexReader(ColumnIdentifier columnIdentifier) {
+    DictionaryService dictService = CarbonCommonFactory.getDictionaryService();
+    return dictService
+        .getDictionarySortIndexReader(carbonTableIdentifier, columnIdentifier, carbonStorePath);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryChunksWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryChunksWrapper.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryChunksWrapper.java
new file mode 100644
index 0000000..c0c2096
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryChunksWrapper.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * The wrapper class wraps the list<list<bye[]>> and provide the iterator to retrieve the chunks
+ * members and expose the getSize API to get size of members in the List<List<byte>> chunks.
+ * Applications Scenario:
+ * For preparing the column Sort info while writing the sort index file.
+ */
+public class DictionaryChunksWrapper implements Iterator<byte[]> {
+
+  /**
+   * list of dictionaryChunks
+   */
+  private List<List<byte[]>> dictionaryChunks;
+
+  /**
+   * size of the list
+   */
+  private int size;
+
+  /**
+   * Current index of the list
+   */
+  private int currentIndex;
+
+  /**
+   * variable holds the count of elements already iterated
+   */
+  private int iteratorIndex;
+
+  /**
+   * variable holds the current index of List<List<byte[]>> being traversed
+   */
+  private int outerIndex;
+
+  /**
+   * Constructor of DictionaryChunksWrapper
+   *
+   * @param dictionaryChunks
+   */
+  public DictionaryChunksWrapper(List<List<byte[]>> dictionaryChunks) {
+    this.dictionaryChunks = dictionaryChunks;
+    for (List<byte[]> chunk : dictionaryChunks) {
+      this.size += chunk.size();
+    }
+  }
+
+  /**
+   * Returns {@code true} if the iteration has more elements.
+   * (In other words, returns {@code true} if {@link #next} would
+   * return an element rather than throwing an exception.)
+   *
+   * @return {@code true} if the iteration has more elements
+   */
+  @Override public boolean hasNext() {
+    return (currentIndex < size);
+  }
+
+  /**
+   * Returns the next element in the iteration.
+   * The method pics the next elements from the first inner list till first is not finished, pics
+   * the second inner list ...
+   *
+   * @return the next element in the iteration
+   */
+  @Override public byte[] next() {
+    if (iteratorIndex >= dictionaryChunks.get(outerIndex).size()) {
+      iteratorIndex = 0;
+      outerIndex++;
+    }
+    byte[] value = dictionaryChunks.get(outerIndex).get(iteratorIndex);
+    currentIndex++;
+    iteratorIndex++;
+    return value;
+  }
+
+  /**
+   * Removes from the underlying collection the last element returned
+   * by this iterator (optional operation).  This method can be called
+   * only once per call to {@link #next}.  The behavior of an iterator
+   * is unspecified if the underlying collection is modified while the
+   * iteration is in progress in any way other than by calling this
+   * method.
+   *
+   * @throws UnsupportedOperationException if the {@code remove}
+   *                                       operation is not supported by this iterator
+   * @throws IllegalStateException         if the {@code next} method has not
+   *                                       yet been called, or the {@code remove} method has already
+   *                                       been called after the last call to the {@code next}
+   *                                       method
+   * @implSpec The default implementation throws an instance of
+   * {@link UnsupportedOperationException} and performs no other action.
+   */
+  @Override public void remove() {
+    throw new UnsupportedOperationException("Remove operation not supported");
+  }
+
+  /**
+   * returns the total element size in List<List<byte[]>>
+   *
+   * @return
+   */
+  public int getSize() {
+    return size;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryColumnUniqueIdentifier.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryColumnUniqueIdentifier.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryColumnUniqueIdentifier.java
new file mode 100644
index 0000000..c9b08dc
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryColumnUniqueIdentifier.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+
+/**
+ * dictionary column identifier which includes table identifier and column identifier
+ */
+public class DictionaryColumnUniqueIdentifier {
+
+  /**
+   * table fully qualified name
+   */
+  private CarbonTableIdentifier carbonTableIdentifier;
+
+  /**
+   * unique column id
+   */
+  private ColumnIdentifier columnIdentifier;
+
+  private DataType dataType;
+
+  /**
+   * Will be used in case of reverse dictionary cache which will be used
+   * in case of data loading.
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   */
+  public DictionaryColumnUniqueIdentifier(CarbonTableIdentifier carbonTableIdentifier,
+      ColumnIdentifier columnIdentifier) {
+    this.carbonTableIdentifier = carbonTableIdentifier;
+    this.columnIdentifier = columnIdentifier;
+  }
+
+  /**
+   * Will be used in case of forward dictionary cache in case
+   * of query execution.
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param dataType
+   */
+  public DictionaryColumnUniqueIdentifier(CarbonTableIdentifier carbonTableIdentifier,
+      ColumnIdentifier columnIdentifier, DataType dataType) {
+    this(carbonTableIdentifier, columnIdentifier);
+    this.dataType = dataType;
+  }
+
+  public DataType getDataType() {
+    return dataType;
+  }
+
+  /**
+   * @return table identifier
+   */
+  public CarbonTableIdentifier getCarbonTableIdentifier() {
+    return carbonTableIdentifier;
+  }
+
+  /**
+   * @return columnIdentifier
+   */
+  public ColumnIdentifier getColumnIdentifier() {
+    return columnIdentifier;
+  }
+
+  /**
+   * overridden equals method
+   *
+   * @param other
+   * @return
+   */
+  @Override public boolean equals(Object other) {
+    if (this == other) return true;
+    if (other == null || getClass() != other.getClass()) return false;
+    DictionaryColumnUniqueIdentifier that = (DictionaryColumnUniqueIdentifier) other;
+    if (!carbonTableIdentifier.equals(that.carbonTableIdentifier)) return false;
+    return columnIdentifier.equals(that.columnIdentifier);
+
+  }
+
+  /**
+   * overridden hashcode method
+   *
+   * @return
+   */
+  @Override public int hashCode() {
+    int result = carbonTableIdentifier.hashCode();
+    result = 31 * result + columnIdentifier.hashCode();
+    return result;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryInfo.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryInfo.java
new file mode 100644
index 0000000..e34860a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/DictionaryInfo.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.util.List;
+
+import org.apache.carbondata.core.cache.Cacheable;
+
+/**
+ * An interface which holds dictionary information like end offset,
+ * file timestamp for one column
+ */
+public interface DictionaryInfo extends Cacheable, Dictionary {
+
+  /**
+   * This method will increment the access count for a column by 1
+   * whenever a column is getting used in query or incremental data load
+   */
+  void incrementAccessCount();
+
+  /**
+   * This method will update the end offset of file everytime a file is read
+   *
+   * @param offsetTillFileIsRead
+   */
+  void setOffsetTillFileIsRead(long offsetTillFileIsRead);
+
+  /**
+   * This method will update the timestamp of a file if a file is modified
+   * like in case of incremental load
+   *
+   * @param fileTimeStamp
+   */
+  void setFileTimeStamp(long fileTimeStamp);
+
+  /**
+   * This method will add a new dictionary chunk to existing list of dictionary chunks
+   *
+   * @param dictionaryChunk
+   */
+  void addDictionaryChunk(List<byte[]> dictionaryChunk);
+
+  /**
+   * This method will set the sort order index of a dictionary column.
+   * Sort order index if the index of dictionary values after they are sorted.
+   *
+   * @param sortOrderIndex
+   */
+  void setSortOrderIndex(List<Integer> sortOrderIndex);
+
+  /**
+   * This method will set the sort reverse index of a dictionary column.
+   * Sort reverse index is the index of dictionary values before they are sorted.
+   *
+   * @param sortReverseOrderIndex
+   */
+  void setSortReverseOrderIndex(List<Integer> sortReverseOrderIndex);
+
+  /**
+   * dictionary metadata file length which will be set whenever we reload dictionary
+   * data from disk
+   *
+   * @param dictionaryMetaFileLength length of dictionary metadata file
+   */
+  void setDictionaryMetaFileLength(long dictionaryMetaFileLength);
+
+  /**
+   * Dictionary meta file offset which will be read to check whether length of dictionary
+   * meta file has been modified
+   *
+   * @return
+   */
+  long getDictionaryMetaFileLength();
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionary.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionary.java
new file mode 100644
index 0000000..bddab56
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionary.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * This class will be used for dictionary key and value look up
+ */
+public class ForwardDictionary implements Dictionary {
+
+  /**
+   * Object which will hold the information related to this dictionary column
+   */
+  private ColumnDictionaryInfo columnDictionaryInfo;
+
+  /**
+   * @param columnDictionaryInfo
+   */
+  public ForwardDictionary(ColumnDictionaryInfo columnDictionaryInfo) {
+    this.columnDictionaryInfo = columnDictionaryInfo;
+  }
+
+  /**
+   * This method will find and return the surrogate key for a given dictionary value
+   * Applicable scenario:
+   * 1. Incremental data load : Dictionary will not be generated for existing values. For
+   * that values have to be looked up in the existing dictionary cache.
+   * 2. Filter scenarios where from value surrogate key has to be found.
+   *
+   * @param value dictionary value
+   * @return if found returns key else 0
+   */
+  @Override public int getSurrogateKey(String value) {
+    return columnDictionaryInfo.getSurrogateKey(value);
+  }
+
+  /**
+   * This method will find and return the surrogate key for a given dictionary value
+   * Applicable scenario:
+   * 1. Incremental data load : Dictionary will not be generated for existing values. For
+   * that values have to be looked up in the existing dictionary cache.
+   * 2. Filter scenarios where from value surrogate key has to be found.
+   *
+   * @param value dictionary value as byte array
+   * @return if found returns key else 0
+   */
+  @Override public int getSurrogateKey(byte[] value) {
+    return columnDictionaryInfo.getSurrogateKey(value);
+  }
+
+  /**
+   * This method will find and return the dictionary value for a given surrogate key.
+   * Applicable scenarios:
+   * 1. Query final result preparation : While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return value if found else null
+   */
+  @Override public String getDictionaryValueForKey(int surrogateKey) {
+    return columnDictionaryInfo.getDictionaryValueForKey(surrogateKey);
+  }
+
+  /**
+   * This method will find and return the sort index for a given dictionary id.
+   * Applicable scenarios:
+   * 1. Used in case of order by queries when data sorting is required
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return if found returns key else 0
+   */
+  @Override public int getSortedIndex(int surrogateKey) {
+    return columnDictionaryInfo.getSortedIndex(surrogateKey);
+  }
+
+  /**
+   * This method will find and return the dictionary value from sorted index.
+   * Applicable scenarios:
+   * 1. Query final result preparation in case of order by queries:
+   * While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param sortedIndex sort index of dictionary value
+   * @return value if found else null
+   */
+  @Override public String getDictionaryValueFromSortedIndex(int sortedIndex) {
+    return columnDictionaryInfo.getDictionaryValueFromSortedIndex(sortedIndex);
+  }
+
+  /**
+   * The method return the dictionary chunks wrapper of a column
+   * The wrapper wraps the list<list<bye[]>> and provide the iterator to retrieve the chunks
+   * members.
+   * Applications Scenario:
+   * For preparing the column Sort info while writing the sort index file.
+   *
+   * @return
+   */
+  @Override public DictionaryChunksWrapper getDictionaryChunks() {
+    return columnDictionaryInfo.getDictionaryChunks();
+  }
+
+  /**
+   * This method will release the objects and set default value for primitive types
+   */
+  @Override public void clear() {
+    if (null != columnDictionaryInfo) {
+      columnDictionaryInfo.clear();
+      columnDictionaryInfo = null;
+    }
+  }
+
+  /**
+   * This method will read the surrogates based on search range.
+   *
+   * @param surrogates
+   */
+  public void getSurrogateKeyByIncrementalSearch(List<String> evaluateResultList,
+      List<Integer> surrogates) {
+    List<byte[]> byteValuesOfFilterMembers = new ArrayList<byte[]>(evaluateResultList.size());
+    byte[] keyData = null;
+    for (int i = 0; i < evaluateResultList.size(); i++) {
+      keyData = evaluateResultList.get(i)
+          .getBytes(Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
+      byteValuesOfFilterMembers.add(keyData);
+    }
+
+    columnDictionaryInfo
+        .getIncrementalSurrogateKeyFromDictionary(byteValuesOfFilterMembers, surrogates);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionaryCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionaryCache.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionaryCache.java
new file mode 100644
index 0000000..d4f6adc
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionaryCache.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.cache.CacheType;
+import org.apache.carbondata.core.cache.CarbonLRUCache;
+import org.apache.carbondata.core.util.CarbonUtilException;
+
+/**
+ * This class implements methods to create dictionary cache which will hold
+ * dictionary chunks for look up of surrogate keys and values
+ */
+public class ForwardDictionaryCache<K extends DictionaryColumnUniqueIdentifier,
+                                    V extends Dictionary>
+    extends AbstractDictionaryCache<K, V> {
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(ForwardDictionaryCache.class.getName());
+
+  /**
+   * @param carbonStorePath
+   * @param carbonLRUCache
+   */
+  public ForwardDictionaryCache(String carbonStorePath, CarbonLRUCache carbonLRUCache) {
+    super(carbonStorePath, carbonLRUCache);
+  }
+
+  /**
+   * This method will get the value for the given key. If value does not exist
+   * for the given key, it will check and load the value.
+   *
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   * @return dictionary
+   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
+   */
+  @Override public Dictionary get(DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier)
+      throws CarbonUtilException {
+    return getDictionary(dictionaryColumnUniqueIdentifier);
+  }
+
+  /**
+   * This method will return a list of values for the given list of keys.
+   * For each key, this method will check and load the data if required.
+   *
+   * @param dictionaryColumnUniqueIdentifiers unique identifier which contains dbName,
+   *                                          tableName and columnIdentifier
+   * @return list of dictionary
+   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
+   */
+  @Override public List<Dictionary> getAll(
+      List<DictionaryColumnUniqueIdentifier> dictionaryColumnUniqueIdentifiers)
+      throws CarbonUtilException {
+    boolean exceptionOccurredInDictionaryLoading = false;
+    String exceptionMessage = "";
+    List<Dictionary> forwardDictionaryObjectList =
+        new ArrayList<Dictionary>(dictionaryColumnUniqueIdentifiers.size());
+    List<Future<Dictionary>> taskSubmitList =
+        new ArrayList<>(dictionaryColumnUniqueIdentifiers.size());
+    ExecutorService executorService = Executors.newFixedThreadPool(thread_pool_size);
+    for (final DictionaryColumnUniqueIdentifier uniqueIdent : dictionaryColumnUniqueIdentifiers) {
+      taskSubmitList.add(executorService.submit(new Callable<Dictionary>() {
+        @Override public Dictionary call() throws CarbonUtilException {
+          Dictionary dictionary = getDictionary(uniqueIdent);
+          return dictionary;
+        }
+      }));
+    }
+    try {
+      executorService.shutdown();
+      executorService.awaitTermination(2, TimeUnit.HOURS);
+    } catch (InterruptedException e) {
+      LOGGER.error("Error loading the dictionary: " + e.getMessage());
+    }
+    for (int i = 0; i < taskSubmitList.size(); i++) {
+      try {
+        Dictionary columnDictionary = taskSubmitList.get(i).get();
+        forwardDictionaryObjectList.add(columnDictionary);
+      } catch (Throwable e) {
+        exceptionOccurredInDictionaryLoading = true;
+        exceptionMessage = e.getMessage();
+      }
+    }
+    if (exceptionOccurredInDictionaryLoading) {
+      clearDictionary(forwardDictionaryObjectList);
+      LOGGER.error(exceptionMessage);
+      throw new CarbonUtilException(exceptionMessage);
+    }
+    return forwardDictionaryObjectList;
+  }
+
+  /**
+   * This method will return the value for the given key. It will not check and load
+   * the data for the given key
+   *
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   * @return
+   */
+  @Override public Dictionary getIfPresent(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
+    Dictionary forwardDictionary = null;
+    ColumnDictionaryInfo columnDictionaryInfo = (ColumnDictionaryInfo) carbonLRUCache.get(
+        getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
+            CacheType.FORWARD_DICTIONARY));
+    if (null != columnDictionaryInfo) {
+      forwardDictionary = new ForwardDictionary(columnDictionaryInfo);
+      incrementDictionaryAccessCount(columnDictionaryInfo);
+    }
+    return forwardDictionary;
+  }
+
+  /**
+   * This method will remove the cache for a given key
+   *
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   */
+  @Override public void invalidate(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
+    carbonLRUCache.remove(
+        getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
+            CacheType.FORWARD_DICTIONARY));
+  }
+
+  /**
+   * This method will get the value for the given key. If value does not exist
+   * for the given key, it will check and load the value.
+   *
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   * @return dictionary
+   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
+   */
+  private Dictionary getDictionary(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier)
+      throws CarbonUtilException {
+    Dictionary forwardDictionary = null;
+    // create column dictionary info object only if dictionary and its
+    // metadata file exists for a given column identifier
+    if (!isFileExistsForGivenColumn(dictionaryColumnUniqueIdentifier)) {
+      throw new CarbonUtilException(
+          "Either dictionary or its metadata does not exist for column identifier :: "
+              + dictionaryColumnUniqueIdentifier.getColumnIdentifier());
+    }
+    String columnIdentifier = dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId();
+    ColumnDictionaryInfo columnDictionaryInfo =
+        getColumnDictionaryInfo(dictionaryColumnUniqueIdentifier, columnIdentifier);
+    // load sort index file in case of forward dictionary
+    checkAndLoadDictionaryData(dictionaryColumnUniqueIdentifier, columnDictionaryInfo,
+        getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
+            CacheType.FORWARD_DICTIONARY), true);
+    forwardDictionary = new ForwardDictionary(columnDictionaryInfo);
+    return forwardDictionary;
+  }
+
+  /**
+   * This method will check and create columnDictionaryInfo object for the given column
+   *
+   * @param dictionaryColumnUniqueIdentifier
+   * @param columnIdentifier
+   * @return
+   */
+  private ColumnDictionaryInfo getColumnDictionaryInfo(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier, String columnIdentifier) {
+    ColumnDictionaryInfo columnDictionaryInfo = (ColumnDictionaryInfo) carbonLRUCache
+        .get(getLruCacheKey(columnIdentifier, CacheType.FORWARD_DICTIONARY));
+    if (null == columnDictionaryInfo) {
+      synchronized (dictionaryColumnUniqueIdentifier) {
+        columnDictionaryInfo = (ColumnDictionaryInfo) carbonLRUCache
+            .get(getLruCacheKey(columnIdentifier, CacheType.FORWARD_DICTIONARY));
+        if (null == columnDictionaryInfo) {
+          columnDictionaryInfo =
+              new ColumnDictionaryInfo(dictionaryColumnUniqueIdentifier.getDataType());
+        }
+      }
+    }
+    return columnDictionaryInfo;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionary.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionary.java
new file mode 100644
index 0000000..1f8a3ff
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionary.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+/**
+ * This class will be used for dictionary key and value look up
+ */
+public class ReverseDictionary implements Dictionary {
+
+  /**
+   * Object which will hold the information related to this dictionary column
+   */
+  private ColumnReverseDictionaryInfo columnReverseDictionaryInfo;
+
+  /**
+   * @param columnReverseDictionaryInfo
+   */
+  public ReverseDictionary(ColumnReverseDictionaryInfo columnReverseDictionaryInfo) {
+    this.columnReverseDictionaryInfo = columnReverseDictionaryInfo;
+  }
+
+  /**
+   * This method will find and return the surrogate key for a given dictionary value
+   * Applicable scenario:
+   * 1. Incremental data load : Dictionary will not be generated for existing values. For
+   * that values have to be looked up in the existing dictionary cache.
+   * 2. Filter scenarios where from value surrogate key has to be found.
+   *
+   * @param value dictionary value
+   * @return if found returns key else 0
+   */
+  @Override public int getSurrogateKey(String value) {
+    return columnReverseDictionaryInfo.getSurrogateKey(value);
+  }
+
+  /**
+   * This method will find and return the surrogate key for a given dictionary value
+   * Applicable scenario:
+   * 1. Incremental data load : Dictionary will not be generated for existing values. For
+   * that values have to be looked up in the existing dictionary cache.
+   * 2. Filter scenarios where from value surrogate key has to be found.
+   *
+   * @param value dictionary value as byte array
+   * @return if found returns key else 0
+   */
+  @Override public int getSurrogateKey(byte[] value) {
+    return columnReverseDictionaryInfo.getSurrogateKey(value);
+  }
+
+  /**
+   * This method will find and return the dictionary value for a given surrogate key.
+   * Applicable scenarios:
+   * 1. Query final result preparation : While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return value if found else null
+   */
+  @Override public String getDictionaryValueForKey(int surrogateKey) {
+    return columnReverseDictionaryInfo.getDictionaryValueForKey(surrogateKey);
+  }
+
+  /**
+   * This method will find and return the sort index for a given dictionary id.
+   * Applicable scenarios:
+   * 1. Used in case of order by queries when data sorting is required
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return if found returns key else 0
+   */
+  @Override public int getSortedIndex(int surrogateKey) {
+    return columnReverseDictionaryInfo.getSortedIndex(surrogateKey);
+  }
+
+  /**
+   * This method will find and return the dictionary value from sorted index.
+   * Applicable scenarios:
+   * 1. Query final result preparation in case of order by queries:
+   * While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param sortedIndex sort index of dictionary value
+   * @return value if found else null
+   */
+  @Override public String getDictionaryValueFromSortedIndex(int sortedIndex) {
+    return columnReverseDictionaryInfo.getDictionaryValueFromSortedIndex(sortedIndex);
+  }
+
+  /**
+   * The method return the dictionary chunks wrapper of a column
+   * The wrapper wraps the list<list<bye[]>> and provide the iterator to retrieve the chunks
+   * members.
+   * Applications Scenario:
+   * For preparing the column Sort info while writing the sort index file.
+   *
+   * @return
+   */
+  @Override public DictionaryChunksWrapper getDictionaryChunks() {
+    return columnReverseDictionaryInfo.getDictionaryChunks();
+  }
+
+  /**
+   * This method will release the objects and set default value for primitive types
+   */
+  @Override public void clear() {
+    if (null != columnReverseDictionaryInfo) {
+      columnReverseDictionaryInfo.clear();
+      columnReverseDictionaryInfo = null;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionaryCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionaryCache.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionaryCache.java
new file mode 100644
index 0000000..aa05570
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionaryCache.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.cache.dictionary;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.cache.CacheType;
+import org.apache.carbondata.core.cache.CarbonLRUCache;
+import org.apache.carbondata.core.util.CarbonUtilException;
+
+/**
+ * This class implements methods to create dictionary cache which will hold
+ * dictionary chunks for look up of surrogate keys and values
+ */
+public class ReverseDictionaryCache<K extends DictionaryColumnUniqueIdentifier,
+    V extends Dictionary>
+    extends AbstractDictionaryCache<K, V> {
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(ForwardDictionaryCache.class.getName());
+
+  /**
+   * @param carbonStorePath
+   * @param carbonLRUCache
+   */
+  public ReverseDictionaryCache(String carbonStorePath, CarbonLRUCache carbonLRUCache) {
+    super(carbonStorePath, carbonLRUCache);
+  }
+
+  /**
+   * This method will get the value for the given key. If value does not exist
+   * for the given key, it will check and load the value.
+   *
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   * @return dictionary
+   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
+   */
+  @Override public Dictionary get(DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier)
+      throws CarbonUtilException {
+    return getDictionary(dictionaryColumnUniqueIdentifier);
+  }
+
+  /**
+   * This method will return a list of values for the given list of keys.
+   * For each key, this method will check and load the data if required.
+   *
+   * @param dictionaryColumnUniqueIdentifiers unique identifier which contains dbName,
+   *                                          tableName and columnIdentifier
+   * @return list of dictionary
+   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
+   */
+  @Override public List<Dictionary> getAll(
+      List<DictionaryColumnUniqueIdentifier> dictionaryColumnUniqueIdentifiers)
+      throws CarbonUtilException {
+    boolean exceptionOccurredInDictionaryLoading = false;
+    String exceptionMessage = "";
+    List<Dictionary> reverseDictionaryObjectList =
+        new ArrayList<Dictionary>(dictionaryColumnUniqueIdentifiers.size());
+    List<Future<Dictionary>> taskSubmitList =
+        new ArrayList<>(dictionaryColumnUniqueIdentifiers.size());
+    ExecutorService executorService = Executors.newFixedThreadPool(thread_pool_size);
+    for (final DictionaryColumnUniqueIdentifier uniqueIdent : dictionaryColumnUniqueIdentifiers) {
+      taskSubmitList.add(executorService.submit(new Callable<Dictionary>() {
+        @Override public Dictionary call() throws CarbonUtilException {
+          Dictionary dictionary = getDictionary(uniqueIdent);
+          return dictionary;
+        }
+      }));
+    }
+    try {
+      executorService.shutdown();
+      executorService.awaitTermination(2, TimeUnit.HOURS);
+    } catch (InterruptedException e) {
+      LOGGER.error("Error loading the dictionary: " + e.getMessage());
+    }
+    for (int i = 0; i < taskSubmitList.size(); i++) {
+      try {
+        Dictionary columnDictionary = taskSubmitList.get(i).get();
+        reverseDictionaryObjectList.add(columnDictionary);
+      } catch (Throwable e) {
+        exceptionOccurredInDictionaryLoading = true;
+        exceptionMessage = e.getMessage();
+      }
+    }
+    if (exceptionOccurredInDictionaryLoading) {
+      clearDictionary(reverseDictionaryObjectList);
+      LOGGER.error(exceptionMessage);
+      throw new CarbonUtilException(exceptionMessage);
+    }
+    return reverseDictionaryObjectList;
+  }
+
+  /**
+   * This method will return the value for the given key. It will not check and load
+   * the data for the given key
+   *
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   * @return
+   */
+  @Override public Dictionary getIfPresent(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
+    Dictionary reverseDictionary = null;
+    ColumnReverseDictionaryInfo columnReverseDictionaryInfo =
+        (ColumnReverseDictionaryInfo) carbonLRUCache.get(
+            getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
+                CacheType.REVERSE_DICTIONARY));
+    if (null != columnReverseDictionaryInfo) {
+      reverseDictionary = new ReverseDictionary(columnReverseDictionaryInfo);
+      incrementDictionaryAccessCount(columnReverseDictionaryInfo);
+    }
+    return reverseDictionary;
+  }
+
+  /**
+   * This method will remove the cache for a given key
+   *
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   */
+  @Override public void invalidate(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
+    carbonLRUCache.remove(
+        getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
+            CacheType.REVERSE_DICTIONARY));
+  }
+
+  /**
+   * This method will get the value for the given key. If value does not exist
+   * for the given key, it will check and load the value.
+   *
+   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
+   *                                         tableName and columnIdentifier
+   * @return dictionary
+   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
+   */
+  private Dictionary getDictionary(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier)
+      throws CarbonUtilException {
+    Dictionary reverseDictionary = null;
+    // create column dictionary info object only if dictionary and its
+    // metadata file exists for a given column identifier
+    if (!isFileExistsForGivenColumn(dictionaryColumnUniqueIdentifier)) {
+      throw new CarbonUtilException(
+          "Either dictionary or its metadata does not exist for column identifier :: "
+              + dictionaryColumnUniqueIdentifier.getColumnIdentifier());
+    }
+    String columnIdentifier = dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId();
+    ColumnReverseDictionaryInfo columnReverseDictionaryInfo =
+        getColumnReverseDictionaryInfo(dictionaryColumnUniqueIdentifier, columnIdentifier);
+    // do not load sort index file for reverse dictionary
+    checkAndLoadDictionaryData(dictionaryColumnUniqueIdentifier, columnReverseDictionaryInfo,
+        getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
+            CacheType.REVERSE_DICTIONARY), false);
+    reverseDictionary = new ReverseDictionary(columnReverseDictionaryInfo);
+    return reverseDictionary;
+  }
+
+  /**
+   * This method will check and create columnReverseDictionaryInfo object for the given column
+   *
+   * @param dictionaryColumnUniqueIdentifier
+   * @param columnIdentifier
+   * @return
+   */
+  private ColumnReverseDictionaryInfo getColumnReverseDictionaryInfo(
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier, String columnIdentifier) {
+    ColumnReverseDictionaryInfo columnReverseDictionaryInfo =
+        (ColumnReverseDictionaryInfo) carbonLRUCache
+            .get(getLruCacheKey(columnIdentifier, CacheType.REVERSE_DICTIONARY));
+    if (null == columnReverseDictionaryInfo) {
+      synchronized (dictionaryColumnUniqueIdentifier) {
+        columnReverseDictionaryInfo = (ColumnReverseDictionaryInfo) carbonLRUCache
+            .get(getLruCacheKey(columnIdentifier, CacheType.REVERSE_DICTIONARY));
+        if (null == columnReverseDictionaryInfo) {
+          columnReverseDictionaryInfo = new ColumnReverseDictionaryInfo();
+        }
+      }
+    }
+    return columnReverseDictionaryInfo;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/AbsoluteTableIdentifier.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/AbsoluteTableIdentifier.java b/core/src/main/java/org/apache/carbondata/core/carbon/AbsoluteTableIdentifier.java
new file mode 100644
index 0000000..c8f603a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/AbsoluteTableIdentifier.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon;
+
+import java.io.Serializable;
+
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+
+/**
+ * identifier which will have store path and carbon table identifier
+ */
+public class AbsoluteTableIdentifier implements Serializable {
+
+  /**
+   * serializable version
+   */
+  private static final long serialVersionUID = 4695047103484427506L;
+
+  /**
+   * path of the store
+   */
+  private String storePath;
+
+  /**
+   * carbon table identifier which will have table name and table database
+   * name
+   */
+  private CarbonTableIdentifier carbonTableIdentifier;
+
+  public AbsoluteTableIdentifier(String storePath, CarbonTableIdentifier carbonTableIdentifier) {
+    //TODO this should be moved to common place where path handling will be handled
+    this.storePath = FileFactory.getUpdatedFilePath(storePath);
+    this.carbonTableIdentifier = carbonTableIdentifier;
+  }
+
+  /**
+   * @return the storePath
+   */
+  public String getStorePath() {
+    return storePath;
+  }
+
+  /**
+   * @return the carbonTableIdentifier
+   */
+  public CarbonTableIdentifier getCarbonTableIdentifier() {
+    return carbonTableIdentifier;
+  }
+
+  /**
+   * to get the hash code
+   */
+  @Override public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result =
+        prime * result + ((carbonTableIdentifier == null) ? 0 : carbonTableIdentifier.hashCode());
+    result = prime * result + ((storePath == null) ? 0 : storePath.hashCode());
+    return result;
+  }
+
+  /**
+   * to check this class is equal to
+   * other object passed
+   *
+   * @param obj other object
+   */
+  @Override public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (!(obj instanceof AbsoluteTableIdentifier)) {
+      return false;
+    }
+    AbsoluteTableIdentifier other = (AbsoluteTableIdentifier) obj;
+    if (carbonTableIdentifier == null) {
+      if (other.carbonTableIdentifier != null) {
+        return false;
+      }
+    } else if (!carbonTableIdentifier.equals(other.carbonTableIdentifier)) {
+      return false;
+    }
+    if (storePath == null) {
+      if (other.storePath != null) {
+        return false;
+      }
+    } else if (!storePath.equals(other.storePath)) {
+      return false;
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/CarbonDataLoadSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/CarbonDataLoadSchema.java b/core/src/main/java/org/apache/carbondata/core/carbon/CarbonDataLoadSchema.java
new file mode 100644
index 0000000..7cfefc9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/CarbonDataLoadSchema.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.schema.table.CarbonTable;
+
+/**
+ * Wrapper Data Load Schema object which will be used to
+ * support relation while data loading
+ */
+public class CarbonDataLoadSchema implements Serializable {
+
+  /**
+   * default serializer
+   */
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * CarbonTable info
+   */
+  private CarbonTable carbonTable;
+
+  /**
+   * dimension table and relation info
+   */
+  private List<DimensionRelation> dimensionRelationList;
+
+  /**
+   * CarbonDataLoadSchema constructor which takes CarbonTable
+   *
+   * @param carbonTable
+   */
+  public CarbonDataLoadSchema(CarbonTable carbonTable) {
+    this.carbonTable = carbonTable;
+    this.dimensionRelationList = new ArrayList<DimensionRelation>();
+  }
+
+  /**
+   * get dimension relation list
+   *
+   * @return dimensionRelationList
+   */
+  public List<DimensionRelation> getDimensionRelationList() {
+    return dimensionRelationList;
+  }
+
+  /**
+   * set dimensionrelation list
+   *
+   * @param dimensionRelationList
+   */
+  public void setDimensionRelationList(List<DimensionRelation> dimensionRelationList) {
+    this.dimensionRelationList = dimensionRelationList;
+  }
+
+  /**
+   * get carbontable
+   *
+   * @return carbonTable
+   */
+  public CarbonTable getCarbonTable() {
+    return carbonTable;
+  }
+
+  /**
+   * Dimension Relation object which will be filled from
+   * Load DML Command to support normalized table data load
+   */
+  public static class DimensionRelation implements Serializable {
+    /**
+     * default serializer
+     */
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * dimension tableName
+     */
+    private String tableName;
+
+    /**
+     * dimensionSource csv path
+     */
+    private String dimensionSource;
+
+    /**
+     * relation with fact and dimension table
+     */
+    private Relation relation;
+
+    /**
+     * Columns to selected from dimension table.
+     * Hierarchy in-memory table should be prepared
+     * based on selected columns
+     */
+    private List<String> columns;
+
+    /**
+     * constructor
+     *
+     * @param tableName       - dimension table name
+     * @param dimensionSource - source file path
+     * @param relation        - fact foreign key with dimension primary key mapping
+     * @param columns         - list of columns to be used from this dimension table
+     */
+    public DimensionRelation(String tableName, String dimensionSource, Relation relation,
+        List<String> columns) {
+      this.tableName = tableName;
+      this.dimensionSource = dimensionSource;
+      this.relation = relation;
+      this.columns = columns;
+    }
+
+    /**
+     * @return tableName
+     */
+    public String getTableName() {
+      return tableName;
+    }
+
+    /**
+     * @return dimensionSource
+     */
+    public String getDimensionSource() {
+      return dimensionSource;
+    }
+
+    /**
+     * @return relation
+     */
+    public Relation getRelation() {
+      return relation;
+    }
+
+    /**
+     * @return columns
+     */
+    public List<String> getColumns() {
+      return columns;
+    }
+  }
+
+  /**
+   * Relation class to specify fact foreignkey column with
+   * dimension primary key column
+   */
+  public static class Relation implements Serializable {
+    /**
+     * default serializer
+     */
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Fact foreign key column
+     */
+    private String factForeignKeyColumn;
+
+    /**
+     * dimension primary key column
+     */
+    private String dimensionPrimaryKeyColumn;
+
+    /**
+     * constructor
+     *
+     * @param factForeignKeyColumn      - Fact Table Foreign key
+     * @param dimensionPrimaryKeyColumn - Dimension Table primary key
+     */
+    public Relation(String factForeignKeyColumn, String dimensionPrimaryKeyColumn) {
+      this.factForeignKeyColumn = factForeignKeyColumn;
+      this.dimensionPrimaryKeyColumn = dimensionPrimaryKeyColumn;
+    }
+
+    /**
+     * @return factForeignKeyColumn
+     */
+    public String getFactForeignKeyColumn() {
+      return factForeignKeyColumn;
+    }
+
+    /**
+     * @return dimensionPrimaryKeyColumn
+     */
+    public String getDimensionPrimaryKeyColumn() {
+      return dimensionPrimaryKeyColumn;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/CarbonTableIdentifier.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/CarbonTableIdentifier.java b/core/src/main/java/org/apache/carbondata/core/carbon/CarbonTableIdentifier.java
new file mode 100644
index 0000000..bb8a816
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/CarbonTableIdentifier.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon;
+
+import java.io.Serializable;
+
+/**
+ * Identifier class which will hold the table qualified name
+ */
+public class CarbonTableIdentifier implements Serializable {
+
+  /**
+   * database name
+   */
+  private String databaseName;
+
+  /**
+   * table name
+   */
+  private String tableName;
+
+  /**
+   * table id
+   */
+  private String tableId;
+
+  /**
+   * constructor
+   */
+  public CarbonTableIdentifier(String databaseName, String tableName, String tableId) {
+    this.databaseName = databaseName;
+    this.tableName = tableName;
+    this.tableId = tableId;
+  }
+
+  /**
+   * return database name
+   */
+  public String getDatabaseName() {
+    return databaseName;
+  }
+
+  /**
+   * return table name
+   */
+  public String getTableName() {
+    return tableName;
+  }
+
+  /**
+   * @return tableId
+   */
+  public String getTableId() {
+    return tableId;
+  }
+
+  /**
+   * @return table unique name
+   */
+  public String getTableUniqueName() {
+    return databaseName + '_' + tableName;
+  }
+
+  @Override public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((databaseName == null) ? 0 : databaseName.hashCode());
+    result = prime * result + ((tableId == null) ? 0 : tableId.hashCode());
+    result = prime * result + ((tableName == null) ? 0 : tableName.hashCode());
+    return result;
+  }
+
+  @Override public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    CarbonTableIdentifier other = (CarbonTableIdentifier) obj;
+    if (databaseName == null) {
+      if (other.databaseName != null) {
+        return false;
+      }
+    } else if (!databaseName.equals(other.databaseName)) {
+      return false;
+    }
+    if (tableId == null) {
+      if (other.tableId != null) {
+        return false;
+      }
+    } else if (!tableId.equals(other.tableId)) {
+      return false;
+    }
+    if (tableName == null) {
+      if (other.tableName != null) {
+        return false;
+      }
+    } else if (!tableName.equals(other.tableName)) {
+      return false;
+    }
+    return true;
+  }
+
+  /*
+ * @return table unidque name
+ */
+  @Override public String toString() {
+    return databaseName + '_' + tableName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/ColumnIdentifier.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/ColumnIdentifier.java b/core/src/main/java/org/apache/carbondata/core/carbon/ColumnIdentifier.java
new file mode 100644
index 0000000..9dda92c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/ColumnIdentifier.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon;
+
+import java.io.Serializable;
+import java.util.Map;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+
+/**
+ * Column unique identifier
+ */
+public class ColumnIdentifier implements Serializable {
+
+  /**
+   *
+   */
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * column id
+   */
+  private String columnId;
+
+  /**
+   * column properties
+   */
+  private Map<String, String> columnProperties;
+
+  private DataType dataType;
+
+  /**
+   * @param columnId
+   * @param columnProperties
+   */
+  public ColumnIdentifier(String columnId, Map<String, String> columnProperties,
+      DataType dataType) {
+    this.columnId = columnId;
+    this.columnProperties = columnProperties;
+    this.dataType = dataType;
+  }
+
+  /**
+   * @return columnId
+   */
+  public String getColumnId() {
+    return columnId;
+  }
+
+  /**
+   * @param columnProperty
+   * @return
+   */
+  public String getColumnProperty(String columnProperty) {
+    if (null != columnProperties) {
+      return columnProperties.get(columnProperty);
+    }
+    return null;
+  }
+
+  public DataType getDataType() {
+    return this.dataType;
+  }
+
+  @Override public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((columnId == null) ? 0 : columnId.hashCode());
+    return result;
+  }
+
+  @Override public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    ColumnIdentifier other = (ColumnIdentifier) obj;
+    if (columnId == null) {
+      if (other.columnId != null) {
+        return false;
+      }
+    } else if (!columnId.equals(other.columnId)) {
+      return false;
+    }
+    return true;
+  }
+
+  @Override public String toString() {
+    return "ColumnIdentifier [columnId=" + columnId + "]";
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BTreeBuilderInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BTreeBuilderInfo.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BTreeBuilderInfo.java
new file mode 100644
index 0000000..9c090da
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/BTreeBuilderInfo.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore;
+
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
+
+/**
+ * below class holds the meta data requires to build the blocks
+ */
+public class BTreeBuilderInfo {
+
+  /**
+   * holds all the information about data
+   * file meta data
+   */
+  private List<DataFileFooter> footerList;
+
+  /**
+   * size of the each column value size
+   * this will be useful for reading
+   */
+  private int[] dimensionColumnValueSize;
+
+  public BTreeBuilderInfo(List<DataFileFooter> footerList,
+      int[] dimensionColumnValueSize) {
+    this.dimensionColumnValueSize = dimensionColumnValueSize;
+    this.footerList = footerList;
+  }
+
+  /**
+   * @return the eachDimensionBlockSize
+   */
+  public int[] getDimensionColumnValueSize() {
+    return dimensionColumnValueSize;
+  }
+
+  /**
+   * @return the footerList
+   */
+  public List<DataFileFooter> getFooterList() {
+    return footerList;
+  }
+}


[10/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/LoadStatistics.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/LoadStatistics.java b/core/src/main/java/org/carbondata/core/util/LoadStatistics.java
deleted file mode 100644
index e5f24e6..0000000
--- a/core/src/main/java/org/carbondata/core/util/LoadStatistics.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.util;
-
-public interface LoadStatistics {
-  //Init PartitonInfo
-  void  initPartitonInfo(String PartitionId);
-
-  //Record the time
-  void recordDicShuffleAndWriteTime();
-
-  void recordLoadCsvfilesToDfTime();
-
-  void recordDictionaryValuesTotalTime(String partitionID,
-      Long dictionaryValuesTotalTimeTimePoint);
-
-  void recordCsvInputStepTime(String partitionID,
-      Long csvInputStepTimePoint);
-
-  void recordLruCacheLoadTime(double lruCacheLoadTime);
-
-  void recordGeneratingDictionaryValuesTime(String partitionID,
-      Long generatingDictionaryValuesTimePoint);
-
-  void recordSortRowsStepTotalTime(String partitionID,
-      Long sortRowsStepTotalTimePoint);
-
-  void recordMdkGenerateTotalTime(String partitionID,
-      Long mdkGenerateTotalTimePoint);
-
-  void recordDictionaryValue2MdkAdd2FileTime(String partitionID,
-      Long dictionaryValue2MdkAdd2FileTimePoint);
-
-  //Record the node blocks information map
-  void recordHostBlockMap(String host, Integer numBlocks);
-
-  //Record the partition blocks information map
-  void recordPartitionBlockMap(String partitionID, Integer numBlocks);
-
-  //Record total num of records processed
-  void recordTotalRecords(long totalRecords);
-
-  //Print the statistics information
-  void printStatisticsInfo(String partitionID);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/util/ValueCompressionUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/ValueCompressionUtil.java b/core/src/main/java/org/carbondata/core/util/ValueCompressionUtil.java
deleted file mode 100644
index ac2281d..0000000
--- a/core/src/main/java/org/carbondata/core/util/ValueCompressionUtil.java
+++ /dev/null
@@ -1,1027 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.util;
-
-import java.nio.ByteBuffer;
-import java.util.Arrays;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.MeasureMetaDataModel;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressByteArray;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressDefaultLong;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinByte;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinDefault;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinFloat;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinInt;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinLong;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinShort;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalByte;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalDefault;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalFloat;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalInt;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalLong;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinByte;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinDefault;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinFloat;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinInt;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinLong;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinShort;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalShort;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNoneByte;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNoneDefault;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNoneFloat;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNoneInt;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNoneLong;
-import org.carbondata.core.datastorage.store.compression.type.UnCompressNoneShort;
-
-public final class ValueCompressionUtil {
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(ValueCompressionUtil.class.getName());
-
-  private ValueCompressionUtil() {
-
-  }
-
-  /**
-   * decide actual type of value
-   *
-   * @param value   :the measure value
-   * @param decimal :
-   * @return: actual type of value
-   * @see
-   */
-  private static DataType getDataType(double value, int decimal, byte dataTypeSelected) {
-    DataType dataType = DataType.DATA_DOUBLE;
-    if (decimal == 0) {
-      if (value < Byte.MAX_VALUE) {
-        dataType = DataType.DATA_BYTE;
-      } else if (value < Short.MAX_VALUE) {
-        dataType = DataType.DATA_SHORT;
-      } else if (value < Integer.MAX_VALUE) {
-        dataType = DataType.DATA_INT;
-      } else if (value < Long.MAX_VALUE) {
-        dataType = DataType.DATA_LONG;
-      }
-    } else {
-      if (dataTypeSelected == 1) {
-        if (value < Float.MAX_VALUE) {
-          float floatValue = (float) value;
-          if (floatValue - value != 0) {
-            dataType = DataType.DATA_DOUBLE;
-
-          } else {
-            dataType = DataType.DATA_FLOAT;
-          }
-        } else if (value < Double.MAX_VALUE) {
-          dataType = DataType.DATA_DOUBLE;
-        }
-      }
-    }
-    return dataType;
-  }
-
-  /**
-   * Gives the size of datatype
-   *
-   * @param dataType : measure value type
-   * @return: the size of DataType
-   * @see
-   */
-  public static int getSize(DataType dataType) {
-
-    switch (dataType) {
-      case DATA_BYTE:
-        return 1;
-      case DATA_SHORT:
-        return 2;
-      case DATA_INT:
-      case DATA_FLOAT:
-        return 4;
-      default:
-        return 8;
-    }
-  }
-
-  /**
-   * get the best compression type. priority list,from high to low:
-   * COMPRESSION_TYPE.NONE COMPRESSION_TYPE.MAX_MIN
-   * COMPRESSION_TYPE.NON_DECIMAL_CONVERT COMPRESSION_TYPE.MAX_MIN_NDC
-   *
-   * @param maxValue : max value of one measure
-   * @param minValue : min value of one measure
-   * @param decimal  : decimal num of one measure
-   * @return : the best compression type
-   * @see
-   */
-  private static CompressionFinder getCompressionType(Object maxValue, Object minValue, int decimal,
-      char aggregatorType, byte dataTypeSelected) {
-    // 'c' for aggregate table,'b' fo rBigdecimal, 'l' for long,'n' for double
-    switch (aggregatorType) {
-      case 'c':
-        return new CompressionFinder(COMPRESSION_TYPE.CUSTOM, DataType.DATA_BYTE,
-            DataType.DATA_BYTE);
-      case 'b':
-        return new CompressionFinder(COMPRESSION_TYPE.CUSTOM_BIGDECIMAL, DataType.DATA_BYTE,
-            DataType.DATA_BYTE);
-      case 'l':
-        return new CompressionFinder(COMPRESSION_TYPE.NONE,
-                DataType.DATA_BIGINT, DataType.DATA_BIGINT);
-      default:
-        break;
-    }
-    // None Decimal
-    if (decimal == 0) {
-      if (getSize(getDataType((double) maxValue, decimal, dataTypeSelected)) > getSize(
-          getDataType((double) maxValue - (double) minValue, decimal, dataTypeSelected))) {
-        return new CompressionFinder(COMPRESSION_TYPE.MAX_MIN, DataType.DATA_DOUBLE,
-            getDataType((double) maxValue - (double) minValue, decimal, dataTypeSelected));
-      } else if (getSize(getDataType((double) maxValue, decimal, dataTypeSelected)) < getSize(
-              getDataType((double) maxValue - (double) minValue, decimal, dataTypeSelected))) {
-        return new CompressionFinder(COMPRESSION_TYPE.NONE, DataType.DATA_DOUBLE,
-                getDataType((double) maxValue - (double) minValue, decimal, dataTypeSelected));
-      } else {
-        return new CompressionFinder(COMPRESSION_TYPE.NONE, DataType.DATA_DOUBLE,
-            getDataType((double) maxValue, decimal, dataTypeSelected));
-      }
-    }
-    // decimal
-    else {
-      DataType actualDataType = getDataType((double) maxValue, decimal, dataTypeSelected);
-      DataType diffDataType =
-          getDataType((double) maxValue - (double) minValue, decimal, dataTypeSelected);
-      DataType maxNonDecDataType =
-          getDataType(Math.pow(10, decimal) * (double) maxValue, 0, dataTypeSelected);
-      DataType diffNonDecDataType =
-          getDataType(Math.pow(10, decimal) * ((double) maxValue - (double) minValue), 0,
-              dataTypeSelected);
-
-      CompressionFinder[] finders = new CompressionFinder[] {
-          new CompressionFinder(actualDataType, actualDataType, CompressionFinder.PRIORITY.ACTUAL,
-              COMPRESSION_TYPE.NONE),
-          new CompressionFinder(actualDataType, diffDataType, CompressionFinder.PRIORITY.DIFFSIZE,
-              COMPRESSION_TYPE.MAX_MIN), new CompressionFinder(actualDataType, maxNonDecDataType,
-          CompressionFinder.PRIORITY.MAXNONDECIMAL, COMPRESSION_TYPE.NON_DECIMAL_CONVERT),
-          new CompressionFinder(actualDataType, diffNonDecDataType,
-              CompressionFinder.PRIORITY.DIFFNONDECIMAL, COMPRESSION_TYPE.MAX_MIN_NDC) };
-      // sort the compressionFinder.The top have the highest priority
-      Arrays.sort(finders);
-      CompressionFinder compression = finders[0];
-      return compression;
-    }
-  }
-
-  /**
-   * @param compType        : compression type
-   * @param values          : the data of one measure
-   * @param changedDataType : changed data type
-   * @param maxValue        : the max value of one measure
-   * @param decimal         : the decimal length of one measure
-   * @return: the compress data array
-   * @see
-   */
-  public static Object getCompressedValues(COMPRESSION_TYPE compType, double[] values,
-      DataType changedDataType, double maxValue, int decimal) {
-    Object o;
-    switch (compType) {
-      case NONE:
-
-        o = compressNone(changedDataType, values);
-        return o;
-
-      case MAX_MIN:
-
-        o = compressMaxMin(changedDataType, values, maxValue);
-        return o;
-
-      case NON_DECIMAL_CONVERT:
-
-        o = compressNonDecimal(changedDataType, values, decimal);
-        return o;
-
-      default:
-        o = compressNonDecimalMaxMin(changedDataType, values, decimal, maxValue);
-        return o;
-    }
-  }
-
-  public static Object getCompressedValues(COMPRESSION_TYPE compType, long[] values,
-      DataType changedDataType, long maxValue, int decimal) {
-    Object o;
-    switch (compType) {
-      case NONE:
-      default:
-        return values;
-    }
-  }
-
-  private static ValueCompressonHolder.UnCompressValue[] getUncompressedValues(
-      COMPRESSION_TYPE[] compType, DataType[] actualDataType, DataType[] changedDataType) {
-
-    ValueCompressonHolder.UnCompressValue[] compressValue =
-        new ValueCompressonHolder.UnCompressValue[changedDataType.length];
-    for (int i = 0; i < changedDataType.length; i++) {
-      switch (compType[i]) {
-        case NONE:
-
-          compressValue[i] = unCompressNone(changedDataType[i], actualDataType[i]);
-          break;
-
-        case MAX_MIN:
-
-          compressValue[i] = unCompressMaxMin(changedDataType[i], actualDataType[i]);
-          break;
-
-        case NON_DECIMAL_CONVERT:
-
-          compressValue[i] = unCompressNonDecimal(changedDataType[i], DataType.DATA_DOUBLE);
-          break;
-
-        case CUSTOM:
-          compressValue[i] = new UnCompressByteArray(UnCompressByteArray.ByteArrayType.BYTE_ARRAY);
-          break;
-
-        case CUSTOM_BIGDECIMAL:
-          compressValue[i] = new UnCompressByteArray(UnCompressByteArray.ByteArrayType.BIG_DECIMAL);
-          break;
-
-        default:
-          compressValue[i] = unCompressNonDecimalMaxMin(changedDataType[i], null);
-      }
-    }
-    return compressValue;
-
-  }
-
-  /**
-   * compress data to other type for example: double -> int
-   */
-  private static Object compressNone(DataType changedDataType, double[] value) {
-    int i = 0;
-    switch (changedDataType) {
-
-      case DATA_BYTE:
-
-        byte[] result = new byte[value.length];
-
-        for (double a : value) {
-          result[i] = (byte) a;
-          i++;
-        }
-        return result;
-
-      case DATA_SHORT:
-
-        short[] shortResult = new short[value.length];
-
-        for (double a : value) {
-          shortResult[i] = (short) a;
-          i++;
-        }
-        return shortResult;
-
-      case DATA_INT:
-
-        int[] intResult = new int[value.length];
-
-        for (double a : value) {
-          intResult[i] = (int) a;
-          i++;
-        }
-        return intResult;
-
-      case DATA_LONG:
-      case DATA_BIGINT:
-
-        long[] longResult = new long[value.length];
-
-        for (double a : value) {
-          longResult[i] = (long) a;
-          i++;
-        }
-        return longResult;
-
-      case DATA_FLOAT:
-
-        float[] floatResult = new float[value.length];
-
-        for (double a : value) {
-          floatResult[i] = (float) a;
-          i++;
-        }
-        return floatResult;
-
-      default:
-
-        return value;
-
-    }
-  }
-
-  /**
-   * compress data to other type through sub value for example: 1. subValue =
-   * maxValue - value 2. subValue: double->int
-   */
-  private static Object compressMaxMin(DataType changedDataType, double[] value, double maxValue) {
-    int i = 0;
-    switch (changedDataType) {
-      case DATA_BYTE:
-
-        byte[] result = new byte[value.length];
-        for (double a : value) {
-          result[i] = (byte) (maxValue - a);
-          i++;
-        }
-        return result;
-
-      case DATA_SHORT:
-
-        short[] shortResult = new short[value.length];
-
-        for (double a : value) {
-          shortResult[i] = (short) (maxValue - a);
-          i++;
-        }
-        return shortResult;
-
-      case DATA_INT:
-
-        int[] intResult = new int[value.length];
-
-        for (double a : value) {
-          intResult[i] = (int) (maxValue - a);
-          i++;
-        }
-        return intResult;
-
-      case DATA_LONG:
-
-        long[] longResult = new long[value.length];
-
-        for (double a : value) {
-          longResult[i] = (long) (maxValue - a);
-          i++;
-        }
-        return longResult;
-
-      case DATA_FLOAT:
-
-        float[] floatResult = new float[value.length];
-
-        for (double a : value) {
-          floatResult[i] = (float) (maxValue - a);
-          i++;
-        }
-        return floatResult;
-
-      default:
-
-        double[] defaultResult = new double[value.length];
-
-        for (double a : value) {
-          defaultResult[i] = (double) (maxValue - a);
-          i++;
-        }
-        return defaultResult;
-
-    }
-  }
-
-  /**
-   * compress data to other type through sub value for example: 1. subValue =
-   * value * Math.pow(10, decimal) 2. subValue: double->int
-   */
-  private static Object compressNonDecimal(DataType changedDataType, double[] value, int decimal) {
-    int i = 0;
-    switch (changedDataType) {
-      case DATA_BYTE:
-        byte[] result = new byte[value.length];
-
-        for (double a : value) {
-          result[i] = (byte) (Math.round(Math.pow(10, decimal) * a));
-          i++;
-        }
-        return result;
-      case DATA_SHORT:
-        short[] shortResult = new short[value.length];
-
-        for (double a : value) {
-          shortResult[i] = (short) (Math.round(Math.pow(10, decimal) * a));
-          i++;
-        }
-        return shortResult;
-      case DATA_INT:
-
-        int[] intResult = new int[value.length];
-
-        for (double a : value) {
-          intResult[i] = (int) (Math.round(Math.pow(10, decimal) * a));
-          i++;
-        }
-        return intResult;
-
-      case DATA_LONG:
-
-        long[] longResult = new long[value.length];
-
-        for (double a : value) {
-          longResult[i] = (long) (Math.round(Math.pow(10, decimal) * a));
-          i++;
-        }
-        return longResult;
-
-      case DATA_FLOAT:
-
-        float[] floatResult = new float[value.length];
-
-        for (double a : value) {
-          floatResult[i] = (float) (Math.round(Math.pow(10, decimal) * a));
-          i++;
-        }
-        return floatResult;
-
-      default:
-        double[] defaultResult = new double[value.length];
-
-        for (double a : value) {
-          defaultResult[i] = (double) (Math.round(Math.pow(10, decimal) * a));
-          i++;
-        }
-        return defaultResult;
-    }
-  }
-
-  /**
-   * compress data to other type through sub value for example: 1. subValue =
-   * maxValue - value 2. subValue = subValue * Math.pow(10, decimal) 3.
-   * subValue: double->int
-   */
-  private static Object compressNonDecimalMaxMin(DataType changedDataType, double[] value,
-      int decimal, double maxValue) {
-    int i = 0;
-    switch (changedDataType) {
-      case DATA_BYTE:
-
-        byte[] result = new byte[value.length];
-
-        for (double a : value) {
-          result[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
-          i++;
-        }
-        return result;
-
-      case DATA_SHORT:
-
-        short[] shortResult = new short[value.length];
-
-        for (double a : value) {
-          shortResult[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
-          i++;
-        }
-        return shortResult;
-
-      case DATA_INT:
-
-        int[] intResult = new int[value.length];
-
-        for (double a : value) {
-          intResult[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
-          i++;
-        }
-        return intResult;
-
-      case DATA_LONG:
-
-        long[] longResult = new long[value.length];
-
-        for (double a : value) {
-          longResult[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
-          i++;
-        }
-        return longResult;
-
-      case DATA_FLOAT:
-
-        float[] floatResult = new float[value.length];
-
-        for (double a : value) {
-          floatResult[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
-          i++;
-        }
-        return floatResult;
-
-      default:
-
-        double[] defaultResult = new double[value.length];
-
-        for (double a : value) {
-          defaultResult[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
-          i++;
-        }
-        return defaultResult;
-
-    }
-  }
-
-  /**
-   * uncompress data for example: int -> double
-   */
-  public static ValueCompressonHolder.UnCompressValue unCompressNone(DataType compDataType,
-      DataType actualDataType) {
-    if (actualDataType == DataType.DATA_BIGINT) {
-      return new UnCompressDefaultLong();
-    } else {
-      switch (compDataType) {
-        case DATA_BYTE:
-
-          return new UnCompressNoneByte();
-
-        case DATA_SHORT:
-
-          return new UnCompressNoneShort();
-
-        case DATA_INT:
-
-          return new UnCompressNoneInt();
-
-        case DATA_LONG:
-
-          return new UnCompressNoneLong();
-
-        case DATA_FLOAT:
-
-          return new UnCompressNoneFloat();
-
-        default:
-
-          return new UnCompressNoneDefault();
-      }
-    }
-  }
-
-  /**
-   * uncompress data 1. value = maxValue - subValue 2. value: int->double
-   */
-  public static ValueCompressonHolder.UnCompressValue unCompressMaxMin(DataType compDataType,
-      DataType actualDataType) {
-    switch (compDataType) {
-      case DATA_BYTE:
-
-        return new UnCompressMaxMinByte();
-
-      case DATA_SHORT:
-
-        return new UnCompressMaxMinShort();
-
-      case DATA_INT:
-
-        return new UnCompressMaxMinInt();
-
-      case DATA_LONG:
-
-        return new UnCompressMaxMinLong();
-
-      case DATA_FLOAT:
-
-        return new UnCompressMaxMinFloat();
-
-      default:
-
-        return new UnCompressMaxMinDefault();
-
-    }
-  }
-
-  /**
-   * uncompress data value = value/Math.pow(10, decimal)
-   */
-  public static ValueCompressonHolder.UnCompressValue unCompressNonDecimal(DataType compDataType,
-      DataType actualDataType) {
-    switch (compDataType) {
-      case DATA_BYTE:
-
-        return new UnCompressNonDecimalByte();
-
-      case DATA_SHORT:
-
-        return new UnCompressNonDecimalShort();
-
-      case DATA_INT:
-
-        return new UnCompressNonDecimalInt();
-
-      case DATA_LONG:
-
-        return new UnCompressNonDecimalLong();
-
-      case DATA_FLOAT:
-
-        return new UnCompressNonDecimalFloat();
-
-      default:
-
-        return new UnCompressNonDecimalDefault();
-
-    }
-  }
-
-  /**
-   * uncompress data value = (maxValue - subValue)/Math.pow(10, decimal)
-   */
-  public static ValueCompressonHolder.UnCompressValue unCompressNonDecimalMaxMin(
-      DataType compDataType, DataType actualDataType) {
-    switch (compDataType) {
-      case DATA_BYTE:
-
-        return new UnCompressNonDecimalMaxMinByte();
-
-      case DATA_SHORT:
-
-        return new UnCompressNonDecimalMaxMinShort();
-
-      case DATA_INT:
-
-        return new UnCompressNonDecimalMaxMinInt();
-
-      case DATA_LONG:
-
-        return new UnCompressNonDecimalMaxMinLong();
-
-      case DATA_FLOAT:
-
-        return new UnCompressNonDecimalMaxMinFloat();
-
-      default:
-
-        return new UnCompressNonDecimalMaxMinDefault();
-
-    }
-  }
-
-  /**
-   * Create Value compression model
-   *
-   * @param maxValue
-   * @param minValue
-   * @param decimalLength
-   * @param uniqueValue
-   * @param aggType
-   * @param dataTypeSelected
-   * @return
-   */
-  public static ValueCompressionModel getValueCompressionModel(Object[] maxValue, Object[] minValue,
-      int[] decimalLength, Object[] uniqueValue, char[] aggType, byte[] dataTypeSelected) {
-
-    MeasureMetaDataModel metaDataModel =
-        new MeasureMetaDataModel(minValue, maxValue, decimalLength, maxValue.length, uniqueValue,
-            aggType, dataTypeSelected);
-    return getValueCompressionModel(metaDataModel);
-  }
-
-  public static ValueCompressionModel getValueCompressionModel(MeasureMetaDataModel measureMDMdl) {
-    int measureCount = measureMDMdl.getMeasureCount();
-    Object[] minValue = measureMDMdl.getMinValue();
-    Object[] maxValue = measureMDMdl.getMaxValue();
-    Object[] uniqueValue = measureMDMdl.getUniqueValue();
-    int[] decimal = measureMDMdl.getDecimal();
-    char[] type = measureMDMdl.getType();
-    byte[] dataTypeSelected = measureMDMdl.getDataTypeSelected();
-    ValueCompressionModel compressionModel = new ValueCompressionModel();
-    DataType[] actualType = new DataType[measureCount];
-    DataType[] changedType = new DataType[measureCount];
-    COMPRESSION_TYPE[] compType = new COMPRESSION_TYPE[measureCount];
-    for (int i = 0; i < measureCount; i++) {
-      CompressionFinder compresssionFinder = ValueCompressionUtil
-          .getCompressionType(maxValue[i], minValue[i], decimal[i], type[i], dataTypeSelected[i]);
-      actualType[i] = compresssionFinder.actualDataType;
-      changedType[i] = compresssionFinder.changedDataType;
-      compType[i] = compresssionFinder.compType;
-    }
-    compressionModel.setMaxValue(maxValue);
-    compressionModel.setDecimal(decimal);
-    compressionModel.setChangedDataType(changedType);
-    compressionModel.setCompType(compType);
-    compressionModel.setActualDataType(actualType);
-    compressionModel.setMinValue(minValue);
-    compressionModel.setUniqueValue(uniqueValue);
-    compressionModel.setType(type);
-    compressionModel.setMinValueFactForAgg(measureMDMdl.getMinValueFactForAgg());
-    compressionModel.setDataTypeSelected(dataTypeSelected);
-    ValueCompressonHolder.UnCompressValue[] values = ValueCompressionUtil
-        .getUncompressedValues(compressionModel.getCompType(), compressionModel.getActualDataType(),
-            compressionModel.getChangedDataType());
-    compressionModel.setUnCompressValues(values);
-    return compressionModel;
-  }
-
-  public static byte[] convertToBytes(short[] values) {
-    ByteBuffer buffer = ByteBuffer.allocate(values.length * 2);
-    for (short val : values) {
-      buffer.putShort(val);
-    }
-    return buffer.array();
-  }
-
-  public static byte[] convertToBytes(int[] values) {
-    ByteBuffer buffer = ByteBuffer.allocate(values.length * 4);
-    for (int val : values) {
-      buffer.putInt(val);
-    }
-    return buffer.array();
-  }
-
-  public static byte[] convertToBytes(float[] values) {
-    ByteBuffer buffer = ByteBuffer.allocate(values.length * 4);
-    for (float val : values) {
-      buffer.putFloat(val);
-    }
-    return buffer.array();
-  }
-
-  public static byte[] convertToBytes(long[] values) {
-    ByteBuffer buffer = ByteBuffer.allocate(values.length * 8);
-    for (long val : values) {
-      buffer.putLong(val);
-    }
-    return buffer.array();
-  }
-
-  public static byte[] convertToBytes(double[] values) {
-    ByteBuffer buffer = ByteBuffer.allocate(values.length * 8);
-    for (double val : values) {
-      buffer.putDouble(val);
-    }
-    return buffer.array();
-  }
-
-  public static short[] convertToShortArray(ByteBuffer buffer, int length) {
-    buffer.rewind();
-    short[] values = new short[length / 2];
-
-    for (int i = 0; i < values.length; i++) {
-      values[i] = buffer.getShort();
-    }
-    return values;
-  }
-
-  public static int[] convertToIntArray(ByteBuffer buffer, int length) {
-    buffer.rewind();
-    int[] values = new int[length / 4];
-
-    for (int i = 0; i < values.length; i++) {
-      values[i] = buffer.getInt();
-    }
-    return values;
-  }
-
-  public static float[] convertToFloatArray(ByteBuffer buffer, int length) {
-    buffer.rewind();
-    float[] values = new float[length / 4];
-
-    for (int i = 0; i < values.length; i++) {
-      values[i] = buffer.getFloat();
-    }
-    return values;
-  }
-
-  public static long[] convertToLongArray(ByteBuffer buffer, int length) {
-    buffer.rewind();
-    long[] values = new long[length / 8];
-    for (int i = 0; i < values.length; i++) {
-      values[i] = buffer.getLong();
-    }
-    return values;
-  }
-
-  public static double[] convertToDoubleArray(ByteBuffer buffer, int length) {
-    buffer.rewind();
-    double[] values = new double[length / 8];
-    for (int i = 0; i < values.length; i++) {
-      values[i] = buffer.getDouble();
-    }
-    return values;
-  }
-
-  /**
-   * use to identify compression type.
-   */
-  public static enum COMPRESSION_TYPE {
-    /**
-     *
-     */
-    NONE, /**
-     *
-     */
-    MAX_MIN, /**
-     *
-     */
-    NON_DECIMAL_CONVERT, /**
-     *
-     */
-    MAX_MIN_NDC,
-
-    /**
-     * custome
-     */
-    CUSTOM,
-
-    CUSTOM_BIGDECIMAL
-  }
-
-  /**
-   * use to identify the type of data.
-   */
-  public static enum DataType {
-    /**
-     *
-     */
-    DATA_BYTE(), /**
-     *
-     */
-    DATA_SHORT(), /**
-     *
-     */
-    DATA_INT(), /**
-     *
-     */
-    DATA_FLOAT(), /**
-     *
-     */
-    DATA_LONG(), /**
-     *
-     */
-    DATA_BIGINT(), /**
-     *
-     */
-    DATA_DOUBLE();
-
-    /**
-     * DataType.
-     */
-    private DataType() {
-      //this.size = size;
-    }
-
-  }
-
-  /**
-   * through the size of data type,priority and compression type, select the
-   * best compression type
-   */
-  private static class CompressionFinder implements Comparable<CompressionFinder> {
-    /**
-     * compType.
-     */
-    private COMPRESSION_TYPE compType;
-    /**
-     * actualDataType.
-     */
-    private DataType actualDataType;
-    /**
-     * changedDataType.
-     */
-    private DataType changedDataType;
-    /**
-     * the size of changed data
-     */
-    private int size;
-    /**
-     * priority.
-     */
-    private PRIORITY priority;
-
-    /**
-     * CompressionFinder constructor.
-     *
-     * @param compType
-     * @param actualDataType
-     * @param changedDataType
-     */
-    CompressionFinder(COMPRESSION_TYPE compType, DataType actualDataType,
-        DataType changedDataType) {
-      super();
-      this.compType = compType;
-      this.actualDataType = actualDataType;
-      this.changedDataType = changedDataType;
-    }
-
-    /**
-     * CompressionFinder overloaded constructor.
-     *
-     * @param actualDataType
-     * @param changedDataType
-     * @param priority
-     * @param compType
-     */
-
-    CompressionFinder(DataType actualDataType, DataType changedDataType, PRIORITY priority,
-        COMPRESSION_TYPE compType) {
-      super();
-      this.actualDataType = actualDataType;
-      this.changedDataType = changedDataType;
-      this.size = getSize(changedDataType);
-      this.priority = priority;
-      this.compType = compType;
-    }
-
-    @Override public boolean equals(Object obj) {
-      boolean equals = false;
-      if (obj instanceof CompressionFinder) {
-        CompressionFinder cf = (CompressionFinder) obj;
-
-        if (this.size == cf.size && this.priority == cf.priority) {
-          equals = true;
-        }
-
-      }
-      return equals;
-    }
-
-    @Override public int hashCode() {
-      final int code = 31;
-      int result = 1;
-
-      result = code * result + this.size;
-      result = code * result + ((priority == null) ? 0 : priority.hashCode());
-      return result;
-    }
-
-    @Override public int compareTo(CompressionFinder o) {
-      int returnVal = 0;
-      // the big size have high priority
-      if (this.equals(o)) {
-        returnVal = 0;
-      } else if (this.size == o.size) {
-        // the compression type priority
-        if (priority.priority > o.priority.priority) {
-          returnVal = 1;
-        } else if (priority.priority < o.priority.priority) {
-          returnVal = -1;
-        }
-
-      } else if (this.size > o.size) {
-        returnVal = 1;
-      } else {
-        returnVal = -1;
-      }
-      return returnVal;
-    }
-
-    /**
-     * Compression type priority.
-     * ACTUAL is the highest priority and DIFFNONDECIMAL is the lowest
-     * priority
-     */
-    static enum PRIORITY {
-      /**
-       *
-       */
-      ACTUAL(0), /**
-       *
-       */
-      DIFFSIZE(1), /**
-       *
-       */
-      MAXNONDECIMAL(2), /**
-       *
-       */
-      DIFFNONDECIMAL(3);
-
-      /**
-       * priority.
-       */
-      private int priority;
-
-      private PRIORITY(int priority) {
-        this.priority = priority;
-      }
-    }
-  }
-
-}
-
-
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/ByteArrayHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/ByteArrayHolder.java b/core/src/main/java/org/carbondata/core/writer/ByteArrayHolder.java
deleted file mode 100644
index 599fe09..0000000
--- a/core/src/main/java/org/carbondata/core/writer/ByteArrayHolder.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.writer;
-
-import java.util.Arrays;
-
-import org.carbondata.core.util.ByteUtil;
-
-public class ByteArrayHolder implements Comparable<ByteArrayHolder> {
-
-  /**
-   * mdkey
-   */
-  private byte[] mdKey;
-
-  /**
-   * primary key
-   */
-  private int primaryKey;
-
-  /**
-   * @param mdKey
-   * @param primaryKey
-   */
-  public ByteArrayHolder(byte[] mdKey, int primaryKey) {
-    this.mdKey = mdKey;
-    this.primaryKey = primaryKey;
-  }
-
-  @Override public int compareTo(ByteArrayHolder o) {
-    return ByteUtil.compare(mdKey, o.mdKey);
-  }
-
-  @Override public boolean equals(Object obj) {
-    // TODO Auto-generated method stub
-    if (obj instanceof ByteArrayHolder) {
-      if (0 == ByteUtil.compare(mdKey, ((ByteArrayHolder) obj).mdKey)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  @Override public int hashCode() {
-    int prime = 31;
-    int result = prime * Arrays.hashCode(mdKey);
-    result = result + prime * primaryKey;
-    return result;
-  }
-
-  public byte[] getMdKey() {
-    return mdKey;
-  }
-
-  public int getPrimaryKey() {
-    return primaryKey;
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/CarbonDictionaryWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/CarbonDictionaryWriter.java b/core/src/main/java/org/carbondata/core/writer/CarbonDictionaryWriter.java
deleted file mode 100644
index e13c32f..0000000
--- a/core/src/main/java/org/carbondata/core/writer/CarbonDictionaryWriter.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.writer;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * dictionary writer interface
- */
-public interface CarbonDictionaryWriter extends Closeable {
-  /**
-   * write method that accepts one value at a time
-   * This method can be used when data is huge and memory is les. In that
-   * case data can be stored to a file and an iterator can iterate over it and
-   * pass one value at a time
-   *
-   * @param value unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  void write(String value) throws IOException;
-
-  /**
-   * write method that accepts one value at a time
-   * This method can be used when data is huge and memory is les. In that
-   * case data can be stored to a file and an iterator can iterate over it and
-   * pass one value at a time
-   *
-   * @param value unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  void write(byte[] value) throws IOException;
-
-  /**
-   * write method that accepts list of byte arrays as value
-   * This can be used when data is less, then string can be converted
-   * to byte array for each value and added to a list
-   *
-   * @param valueList list of byte array. Each byte array is unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  void write(List<byte[]> valueList) throws IOException;
-
-
-  void commit() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/CarbonDictionaryWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/CarbonDictionaryWriterImpl.java b/core/src/main/java/org/carbondata/core/writer/CarbonDictionaryWriterImpl.java
deleted file mode 100644
index 2508c86..0000000
--- a/core/src/main/java/org/carbondata/core/writer/CarbonDictionaryWriterImpl.java
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.writer;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.common.factory.CarbonCommonFactory;
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.carbon.path.CarbonTablePath;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.filesystem.CarbonFile;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.reader.CarbonDictionaryColumnMetaChunk;
-import org.carbondata.core.reader.CarbonDictionaryMetadataReader;
-import org.carbondata.core.reader.CarbonDictionaryMetadataReaderImpl;
-import org.carbondata.core.service.PathService;
-import org.carbondata.core.util.CarbonProperties;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.format.ColumnDictionaryChunk;
-import org.carbondata.format.ColumnDictionaryChunkMeta;
-
-import org.apache.thrift.TBase;
-
-/**
- * This class is responsible for writing the dictionary file and its metadata
- */
-public class CarbonDictionaryWriterImpl implements CarbonDictionaryWriter {
-
-  /**
-   * LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(CarbonDictionaryWriterImpl.class.getName());
-
-  /**
-   * carbon type identifier
-   */
-  protected CarbonTableIdentifier carbonTableIdentifier;
-
-  /**
-   * list which will hold values upto maximum of one dictionary chunk size
-   */
-  private List<ByteBuffer> oneDictionaryChunkList;
-
-  /**
-   * Meta object which will hold last segment entry details
-   */
-  private CarbonDictionaryColumnMetaChunk chunkMetaObjectForLastSegmentEntry;
-
-  /**
-   * dictionary file and meta thrift writer
-   */
-  private ThriftWriter dictionaryThriftWriter;
-
-  /**
-   * column identifier
-   */
-  protected ColumnIdentifier columnIdentifier;
-
-  /**
-   * HDFS store path
-   */
-  protected String hdfsStorePath;
-
-  /**
-   * dictionary file path
-   */
-  protected String dictionaryFilePath;
-
-  /**
-   * dictionary metadata file path
-   */
-  protected String dictionaryMetaFilePath;
-
-  /**
-   * start offset of dictionary chunk  for a segment
-   */
-  private long chunk_start_offset;
-
-  /**
-   * end offset of a dictionary chunk for a segment
-   */
-  private long chunk_end_offset;
-
-  /**
-   * total dictionary value record count for one segment
-   */
-  private int totalRecordCount;
-
-  /**
-   * total thrift object chunk count written for one segment
-   */
-  private int chunk_count;
-
-  /**
-   * chunk size for a dictionary file after which data will be written to disk
-   */
-  private int dictionary_one_chunk_size;
-
-  /**
-   * flag to check whether write method is called for first time
-   */
-  private boolean isFirstTime;
-
-  private static final Charset defaultCharset = Charset.forName(
-      CarbonCommonConstants.DEFAULT_CHARSET);
-
-  /**
-   * Constructor
-   *
-   * @param hdfsStorePath         HDFS store path
-   * @param carbonTableIdentifier table identifier which will give table name and database name
-   * @param columnIdentifier      column unique identifier
-   */
-  public CarbonDictionaryWriterImpl(String hdfsStorePath,
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier) {
-    this.carbonTableIdentifier = carbonTableIdentifier;
-    this.columnIdentifier = columnIdentifier;
-    this.hdfsStorePath = hdfsStorePath;
-    this.isFirstTime = true;
-  }
-
-  /**
-   * This method will write the data in thrift format to disk. This method will be guided by
-   * parameter dictionary_one_chunk_size and data will be divided into chunks
-   * based on this parameter
-   *
-   * @param value unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public void write(String value) throws IOException {
-    write(value.getBytes(defaultCharset));
-  }
-
-  /**
-   * This method will write the data in thrift format to disk. This method will be guided by
-   * parameter dictionary_one_chunk_size and data will be divided into chunks
-   * based on this parameter
-   *
-   * @param value unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public void write(byte[] value) throws IOException {
-    if (isFirstTime) {
-      init();
-      isFirstTime = false;
-    }
-    // if one chunk size is equal to list size then write the data to file
-    checkAndWriteDictionaryChunkToFile();
-    oneDictionaryChunkList.add(ByteBuffer.wrap(value));
-    totalRecordCount++;
-  }
-
-  /**
-   * This method will write the data in thrift format to disk. This method will not be guided by
-   * parameter dictionary_one_chunk_size and complete data will be written as one chunk
-   *
-   * @param valueList list of byte array. Each byte array is unique dictionary value
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public void write(List<byte[]> valueList) throws IOException {
-    if (isFirstTime) {
-      init();
-      isFirstTime = false;
-    }
-    for (byte[] value : valueList) {
-      oneDictionaryChunkList.add(ByteBuffer.wrap(value));
-      totalRecordCount++;
-    }
-  }
-
-  /**
-   * write dictionary metadata file and close thrift object
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public void close() throws IOException {
-    if (null != dictionaryThriftWriter) {
-      writeDictionaryFile();
-      // close the thrift writer for dictionary file
-      closeThriftWriter();
-    }
-  }
-
-  /**
-   * check if the threshold has been reached for the number of
-   * values that can kept in memory and then flush the data to file
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  private void checkAndWriteDictionaryChunkToFile() throws IOException {
-    if (oneDictionaryChunkList.size() >= dictionary_one_chunk_size) {
-      writeDictionaryFile();
-      createChunkList();
-    }
-  }
-
-  /**
-   * This method will serialize the object of dictionary file
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  private void writeDictionaryFile() throws IOException {
-    ColumnDictionaryChunk columnDictionaryChunk = new ColumnDictionaryChunk();
-    columnDictionaryChunk.setValues(oneDictionaryChunkList);
-    writeThriftObject(columnDictionaryChunk);
-  }
-
-  /**
-   * This method will check and created the directory path where dictionary file has to be created
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  private void init() throws IOException {
-    initDictionaryChunkSize();
-    initPaths();
-    boolean dictFileExists = CarbonUtil.isFileExists(this.dictionaryFilePath);
-    if (dictFileExists && CarbonUtil.isFileExists(this.dictionaryMetaFilePath)) {
-      this.chunk_start_offset = CarbonUtil.getFileSize(this.dictionaryFilePath);
-      validateDictionaryFileOffsetWithLastSegmentEntryOffset();
-    } else if (dictFileExists) {
-      FileFactory.getCarbonFile(dictionaryFilePath, FileFactory.getFileType(dictionaryFilePath))
-          .delete();
-    }
-    openThriftWriter(this.dictionaryFilePath);
-    createChunkList();
-  }
-
-  protected void initPaths() {
-    PathService pathService = CarbonCommonFactory.getPathService();
-    CarbonTablePath carbonTablePath = pathService.getCarbonTablePath(columnIdentifier,
-            this.hdfsStorePath, carbonTableIdentifier);
-    this.dictionaryFilePath = carbonTablePath.getDictionaryFilePath(columnIdentifier.getColumnId());
-    this.dictionaryMetaFilePath =
-        carbonTablePath.getDictionaryMetaFilePath(columnIdentifier.getColumnId());
-  }
-
-  /**
-   * initialize the value of dictionary chunk that can be kept in memory at a time
-   */
-  private void initDictionaryChunkSize() {
-    try {
-      dictionary_one_chunk_size = Integer.parseInt(CarbonProperties.getInstance()
-          .getProperty(CarbonCommonConstants.DICTIONARY_ONE_CHUNK_SIZE,
-              CarbonCommonConstants.DICTIONARY_ONE_CHUNK_SIZE_DEFAULT));
-    } catch (NumberFormatException e) {
-      dictionary_one_chunk_size =
-          Integer.parseInt(CarbonCommonConstants.DICTIONARY_ONE_CHUNK_SIZE_DEFAULT);
-      LOGGER.error("Dictionary chunk size not configured properly. Taking default size "
-              + dictionary_one_chunk_size);
-    }
-  }
-
-  /**
-   * initialise one dictionary size chunk list and increment chunk count
-   */
-  private void createChunkList() {
-    this.oneDictionaryChunkList = new ArrayList<ByteBuffer>(dictionary_one_chunk_size);
-    chunk_count++;
-  }
-
-  /**
-   * if file already exists then read metadata file and
-   * validate the last entry end offset with file size. If
-   * they are not equal that means some invalid data is present which needs
-   * to be truncated
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  private void validateDictionaryFileOffsetWithLastSegmentEntryOffset() throws IOException {
-    // read last dictionary chunk meta entry from dictionary metadata file
-    chunkMetaObjectForLastSegmentEntry = getChunkMetaObjectForLastSegmentEntry();
-    int bytesToTruncate =
-        (int) (chunk_start_offset - chunkMetaObjectForLastSegmentEntry.getEnd_offset());
-    if (bytesToTruncate > 0) {
-      LOGGER.info("some inconsistency in dictionary file for column " + this.columnIdentifier);
-      // truncate the dictionary data till chunk meta end offset
-      FileFactory.FileType fileType = FileFactory.getFileType(this.dictionaryFilePath);
-      CarbonFile carbonFile = FileFactory.getCarbonFile(this.dictionaryFilePath, fileType);
-      boolean truncateSuccess = carbonFile
-          .truncate(this.dictionaryFilePath, chunkMetaObjectForLastSegmentEntry.getEnd_offset());
-      if (!truncateSuccess) {
-        LOGGER.info("Diction file not truncated successfully for column " + this.columnIdentifier);
-      }
-    }
-  }
-
-  /**
-   * This method will write the dictionary metadata file for a given column
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  private void writeDictionaryMetadataFile() throws IOException {
-    // Format of dictionary metadata file
-    // min, max, start offset, end offset and chunk count
-    int min_surrogate_key = 0;
-    int max_surrogate_key = 0;
-    // case 1: first time dictionary writing
-    // previousMax = 0, totalRecordCount = 5, min = 1, max= 5
-    // case2: file already exists
-    // previousMax = 5, totalRecordCount = 10, min = 6, max = 15
-    // case 3: no unique values, total records 0
-    // previousMax = 15, totalRecordCount = 0, min = 15, max = 15
-    // both min and max equal to previous max
-    if (null != chunkMetaObjectForLastSegmentEntry) {
-      if (0 == totalRecordCount) {
-        min_surrogate_key = chunkMetaObjectForLastSegmentEntry.getMax_surrogate_key();
-      } else {
-        min_surrogate_key = chunkMetaObjectForLastSegmentEntry.getMax_surrogate_key() + 1;
-      }
-      max_surrogate_key =
-          chunkMetaObjectForLastSegmentEntry.getMax_surrogate_key() + totalRecordCount;
-    } else {
-      if (totalRecordCount > 0) {
-        min_surrogate_key = 1;
-      }
-      max_surrogate_key = totalRecordCount;
-    }
-    ColumnDictionaryChunkMeta dictionaryChunkMeta =
-        new ColumnDictionaryChunkMeta(min_surrogate_key, max_surrogate_key, chunk_start_offset,
-            chunk_end_offset, chunk_count);
-    openThriftWriter(this.dictionaryMetaFilePath);
-    // write dictionary metadata file
-    writeThriftObject(dictionaryChunkMeta);
-    closeThriftWriter();
-    LOGGER.info("Dictionary metadata file written successfully for column " + this.columnIdentifier
-            + " at path " + this.dictionaryMetaFilePath);
-  }
-
-  /**
-   * open thrift writer for writing dictionary chunk/meta object
-   *
-   * @param dictionaryFile can be dictionary file name or dictionary metadata file name
-   * @throws IOException if an I/O error occurs
-   */
-  private void openThriftWriter(String dictionaryFile) throws IOException {
-    // create thrift writer instance
-    dictionaryThriftWriter = new ThriftWriter(dictionaryFile, true);
-    // open the file stream
-    dictionaryThriftWriter.open();
-  }
-
-  /**
-   * This method will write the thrift object to a file
-   *
-   * @param dictionaryThriftObject can be dictionary thrift object or dictionary metadata
-   *                               thrift object
-   * @throws IOException if an I/O error occurs
-   */
-  private void writeThriftObject(TBase dictionaryThriftObject) throws IOException {
-    dictionaryThriftWriter.write(dictionaryThriftObject);
-  }
-
-  /**
-   * close dictionary thrift writer
-   */
-  private void closeThriftWriter() {
-    if (null != dictionaryThriftWriter) {
-      dictionaryThriftWriter.close();
-    }
-  }
-
-  /**
-   * This method will read the dictionary chunk metadata thrift object for last entry
-   *
-   * @return last entry of dictionary meta chunk
-   * @throws IOException if an I/O error occurs
-   */
-  private CarbonDictionaryColumnMetaChunk getChunkMetaObjectForLastSegmentEntry()
-      throws IOException {
-    CarbonDictionaryColumnMetaChunk carbonDictionaryColumnMetaChunk = null;
-    CarbonDictionaryMetadataReader columnMetadataReaderImpl = getDictionaryMetadataReader();
-    try {
-      // read the last segment entry for dictionary metadata
-      carbonDictionaryColumnMetaChunk =
-          columnMetadataReaderImpl.readLastEntryOfDictionaryMetaChunk();
-    } finally {
-      // Close metadata reader
-      columnMetadataReaderImpl.close();
-    }
-    return carbonDictionaryColumnMetaChunk;
-  }
-
-  /**
-   * @return
-   */
-  protected CarbonDictionaryMetadataReader getDictionaryMetadataReader() {
-    return new CarbonDictionaryMetadataReaderImpl(hdfsStorePath, carbonTableIdentifier,
-        columnIdentifier);
-  }
-
-  @Override public void commit() throws IOException {
-    if (null != dictionaryThriftWriter) {
-      this.chunk_end_offset = CarbonUtil.getFileSize(this.dictionaryFilePath);
-      writeDictionaryMetadataFile();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/CarbonFooterWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/CarbonFooterWriter.java b/core/src/main/java/org/carbondata/core/writer/CarbonFooterWriter.java
deleted file mode 100644
index 17b5686..0000000
--- a/core/src/main/java/org/carbondata/core/writer/CarbonFooterWriter.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.writer;
-
-import java.io.IOException;
-
-import org.carbondata.format.FileFooter;
-
-/**
- * Writes metadata block to the fact table file in thrift format org.carbondata.format.FileFooter
- */
-public class CarbonFooterWriter {
-
-  // It is version number of this format class.
-  private static int VERSION_NUMBER = 1;
-
-  // Fact file path
-  private String filePath;
-
-  public CarbonFooterWriter(String filePath) {
-    this.filePath = filePath;
-  }
-
-  /**
-   * It writes FileFooter thrift format object to file.
-   *
-   * @param footer
-   * @param currentPosition At where this metadata is going to be written.
-   * @throws IOException
-   */
-  public void writeFooter(FileFooter footer, long currentPosition) throws IOException {
-
-    ThriftWriter thriftWriter = openThriftWriter(filePath);
-    footer.setVersion(VERSION_NUMBER);
-    try {
-      thriftWriter.write(footer);
-      thriftWriter.writeOffset(currentPosition);
-    } catch (Exception e) {
-      throw e;
-    } finally {
-      thriftWriter.close();
-    }
-  }
-
-  /**
-   * open thrift writer for writing dictionary chunk/meta object
-   */
-  private ThriftWriter openThriftWriter(String filePath) throws IOException {
-    // create thrift writer instance
-    ThriftWriter thriftWriter = new ThriftWriter(filePath, true);
-    // open the file stream
-    thriftWriter.open();
-    return thriftWriter;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/CarbonIndexFileWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/CarbonIndexFileWriter.java b/core/src/main/java/org/carbondata/core/writer/CarbonIndexFileWriter.java
deleted file mode 100644
index 5ae7b33..0000000
--- a/core/src/main/java/org/carbondata/core/writer/CarbonIndexFileWriter.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.writer;
-
-import java.io.IOException;
-
-import org.apache.thrift.TBase;
-
-/**
- * Reader class which will be used to read the index file
- */
-public class CarbonIndexFileWriter {
-
-  /**
-   * thrift writer object
-   */
-  private ThriftWriter thriftWriter;
-
-  /**
-   * It writes thrift object to file
-   *
-   * @param footer
-   * @throws IOException
-   */
-  public void writeThrift(TBase indexObject) throws IOException {
-    thriftWriter.write(indexObject);
-  }
-
-  /**
-   * Below method will be used to open the thrift writer
-   *
-   * @param filePath file path where data need to be written
-   * @throws IOException throws io exception in case of any failure
-   */
-  public void openThriftWriter(String filePath) throws IOException {
-    // create thrift writer instance
-    thriftWriter = new ThriftWriter(filePath, true);
-    // open the file stream
-    thriftWriter.open();
-  }
-
-  /**
-   * Below method will be used to close the thrift object
-   */
-  public void close() {
-    thriftWriter.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/HierarchyValueWriterForCSV.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/HierarchyValueWriterForCSV.java b/core/src/main/java/org/carbondata/core/writer/HierarchyValueWriterForCSV.java
deleted file mode 100644
index d75ac6f..0000000
--- a/core/src/main/java/org/carbondata/core/writer/HierarchyValueWriterForCSV.java
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.writer;
-
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileFilter;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.CarbonProperties;
-import org.carbondata.core.util.CarbonUtil;
-
-import org.pentaho.di.core.exception.KettleException;
-
-public class HierarchyValueWriterForCSV {
-
-  /**
-   * Comment for <code>LOGGER</code>
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(HierarchyValueWriterForCSV.class.getName());
-  /**
-   * hierarchyName
-   */
-  private String hierarchyName;
-
-  /**
-   * bufferedOutStream
-   */
-  private FileChannel outPutFileChannel;
-
-  /**
-   * storeFolderLocation
-   */
-  private String storeFolderLocation;
-
-  /**
-   * intialized
-   */
-  private boolean intialized;
-
-  /**
-   * counter the number of files.
-   */
-  private int counter;
-
-  /**
-   * byteArrayList
-   */
-  private List<ByteArrayHolder> byteArrayholder =
-      new ArrayList<ByteArrayHolder>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-
-  /**
-   * toflush
-   */
-  private int toflush;
-
-  public HierarchyValueWriterForCSV(String hierarchy, String storeFolderLocation) {
-    this.hierarchyName = hierarchy;
-    this.storeFolderLocation = storeFolderLocation;
-
-    CarbonProperties instance = CarbonProperties.getInstance();
-
-    this.toflush = Integer.parseInt(instance
-        .getProperty(CarbonCommonConstants.SORT_SIZE, CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL));
-
-    int rowSetSize = Integer.parseInt(instance.getProperty(CarbonCommonConstants.GRAPH_ROWSET_SIZE,
-        CarbonCommonConstants.GRAPH_ROWSET_SIZE_DEFAULT));
-
-    if (this.toflush > rowSetSize) {
-      this.toflush = rowSetSize;
-    }
-
-    updateCounter(hierarchy, storeFolderLocation);
-  }
-
-  /**
-   * @return Returns the byteArrayList.
-   */
-  public List<ByteArrayHolder> getByteArrayList() throws KettleException {
-    return byteArrayholder;
-  }
-
-  public FileChannel getBufferedOutStream() {
-    return outPutFileChannel;
-  }
-
-  private void updateCounter(final String meString, String storeFolderLocation) {
-    File storeFolder = new File(storeFolderLocation);
-
-    File[] listFiles = storeFolder.listFiles(new FileFilter() {
-
-      @Override public boolean accept(File file) {
-        if (file.getName().indexOf(meString) > -1)
-
-        {
-          return true;
-        }
-        return false;
-      }
-    });
-
-    if (null == listFiles || listFiles.length == 0) {
-      counter = 0;
-      return;
-    }
-
-    for (File hierFile : listFiles) {
-      String hierFileName = hierFile.getName();
-
-      if (hierFileName.endsWith(CarbonCommonConstants.FILE_INPROGRESS_STATUS)) {
-        hierFileName = hierFileName.substring(0, hierFileName.lastIndexOf('.'));
-        try {
-          counter = Integer.parseInt(hierFileName.substring(hierFileName.length() - 1));
-        } catch (NumberFormatException nfe) {
-
-          if (new File(hierFileName + '0' + CarbonCommonConstants.LEVEL_FILE_EXTENSION).exists()) {
-            // Need to skip because the case can come in which server went down while files were
-            // merging and the other hierarchy files were not deleted, and the current file
-            // status is inrogress. so again we will merge the files and rename to normal file
-            LOGGER.info("Need to skip as this can be case in which hierarchy file already renamed");
-            if (hierFile.delete()) {
-              LOGGER.info("Deleted the Inprogress hierarchy Files.");
-            }
-          } else {
-            // levelfileName0.level file not exist that means files is merged and other
-            // files got deleted. while renaming this file from inprogress to normal file,
-            // server got restarted/killed. so we need to rename the file to normal.
-
-            File inprogressFile = new File(storeFolder + File.separator + hierFile.getName());
-            File changetoName = new File(storeFolder + File.separator + hierFileName);
-
-            if (inprogressFile.renameTo(changetoName)) {
-              LOGGER.info(
-                  "Renaming the level Files while creating the new instance on server startup.");
-            }
-
-          }
-
-        }
-      }
-
-      String val = hierFileName.substring(hierFileName.length() - 1);
-
-      int parsedVal = getIntValue(val);
-
-      if (counter < parsedVal) {
-        counter = parsedVal;
-      }
-    }
-    counter++;
-  }
-
-  private int getIntValue(String val) {
-    int parsedVal = 0;
-    try {
-      parsedVal = Integer.parseInt(val);
-    } catch (NumberFormatException nfe) {
-      LOGGER.info("Hierarchy File is already renamed so there will not be"
-              + "any need to keep the counter");
-    }
-    return parsedVal;
-  }
-
-  private void intialize() throws KettleException {
-    intialized = true;
-
-    File f = new File(storeFolderLocation + File.separator + hierarchyName + counter
-        + CarbonCommonConstants.FILE_INPROGRESS_STATUS);
-
-    counter++;
-
-    FileOutputStream fos = null;
-
-    boolean isFileCreated = false;
-    if (!f.exists()) {
-      try {
-        isFileCreated = f.createNewFile();
-
-      } catch (IOException e) {
-        //not required: findbugs fix
-        throw new KettleException("unable to create member mapping file", e);
-      }
-      if (!isFileCreated) {
-        throw new KettleException("unable to create file" + f.getAbsolutePath());
-      }
-    }
-
-    try {
-      fos = new FileOutputStream(f);
-
-      outPutFileChannel = fos.getChannel();
-    } catch (FileNotFoundException e) {
-      closeStreamAndDeleteFile(f, outPutFileChannel, fos);
-      throw new KettleException("member Mapping File not found to write mapping info", e);
-    }
-  }
-
-  public void writeIntoHierarchyFile(byte[] bytes, int primaryKey) throws KettleException {
-    if (!intialized) {
-      intialize();
-    }
-
-    ByteBuffer byteBuffer = storeValueInCache(bytes, primaryKey);
-
-    try {
-      byteBuffer.flip();
-      outPutFileChannel.write(byteBuffer);
-    } catch (IOException e) {
-      throw new KettleException("Error while writting in the hierarchy mapping file", e);
-    }
-  }
-
-  private ByteBuffer storeValueInCache(byte[] bytes, int primaryKey) {
-
-    // adding 4 to store the total length of the row at the beginning
-    ByteBuffer buffer = ByteBuffer.allocate(bytes.length + 4);
-
-    buffer.put(bytes);
-    buffer.putInt(primaryKey);
-
-    return buffer;
-  }
-
-  public void performRequiredOperation() throws KettleException {
-    if (byteArrayholder.size() == 0) {
-      return;
-    }
-    //write to the file and close the stream.
-    Collections.sort(byteArrayholder);
-
-    for (ByteArrayHolder byteArray : byteArrayholder) {
-      writeIntoHierarchyFile(byteArray.getMdKey(), byteArray.getPrimaryKey());
-    }
-
-    CarbonUtil.closeStreams(outPutFileChannel);
-
-    //rename the inprogress file to normal .level file
-    String filePath = this.storeFolderLocation + File.separator + hierarchyName + (counter - 1)
-        + CarbonCommonConstants.FILE_INPROGRESS_STATUS;
-    File inProgressFile = new File(filePath);
-    String inprogressFileName = inProgressFile.getName();
-
-    String changedFileName = inprogressFileName.substring(0, inprogressFileName.lastIndexOf('.'));
-
-    File orgFinalName = new File(this.storeFolderLocation + File.separator + changedFileName);
-
-    if (!inProgressFile.renameTo(orgFinalName)) {
-      LOGGER.error("Not able to rename file : " + inprogressFileName);
-    }
-
-    //create the new outputStream
-    try {
-      intialize();
-    } catch (KettleException e) {
-      LOGGER.error("Not able to create output stream for file:" + hierarchyName + (counter - 1));
-    }
-
-    //clear the byte array holder also.
-    byteArrayholder.clear();
-  }
-
-  private void closeStreamAndDeleteFile(File f, Closeable... streams) throws KettleException {
-    boolean isDeleted = false;
-    for (Closeable stream : streams) {
-      if (null != stream) {
-        try {
-          stream.close();
-        } catch (IOException e) {
-          LOGGER.error(e, "unable to close the stream ");
-        }
-
-      }
-    }
-
-    // delete the file
-    isDeleted = f.delete();
-    if (!isDeleted) {
-      LOGGER.error("Unable to delete the file " + f.getAbsolutePath());
-    }
-
-  }
-
-  public String getHierarchyName() {
-    return hierarchyName;
-  }
-
-  public int getCounter() {
-    return counter;
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/ThriftWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/ThriftWriter.java b/core/src/main/java/org/carbondata/core/writer/ThriftWriter.java
deleted file mode 100644
index 2c5ee1d..0000000
--- a/core/src/main/java/org/carbondata/core/writer/ThriftWriter.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.writer;
-
-import java.io.DataOutputStream;
-import java.io.IOException;
-
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.util.CarbonUtil;
-
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.thrift.TBase;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.TIOStreamTransport;
-
-/**
- * Simple class that makes it easy to write Thrift objects to disk.
- */
-public class ThriftWriter {
-
-  /**
-   * buffer size
-   */
-  private static final int bufferSize = 2048;
-
-  /**
-   * File to write to.
-   */
-  private String fileName;
-
-  /**
-   * For writing to the file.
-   */
-  private DataOutputStream dataOutputStream;
-
-  /**
-   * For binary serialization of objects.
-   */
-  private TProtocol binaryOut;
-
-  /**
-   * flag to append to existing file
-   */
-  private boolean append;
-
-  /**
-   * Constructor.
-   */
-  public ThriftWriter(String fileName, boolean append) {
-    this.fileName = fileName;
-    this.append = append;
-  }
-
-  /**
-   * Open the file for writing.
-   */
-  public void open() throws IOException {
-    FileFactory.FileType fileType = FileFactory.getFileType(fileName);
-    dataOutputStream = FileFactory.getDataOutputStream(fileName, fileType, bufferSize, append);
-    binaryOut = new TCompactProtocol(new TIOStreamTransport(dataOutputStream));
-  }
-
-  /**
-   * Write the object to disk.
-   */
-  public void write(TBase t) throws IOException {
-    try {
-      t.write(binaryOut);
-      dataOutputStream.flush();
-    } catch (TException e) {
-      throw new IOException(e);
-    }
-  }
-
-  /**
-   * Write the offset to the file
-   *
-   * @param offset
-   * @throws IOException
-   */
-  public void writeOffset(long offset) throws IOException {
-    dataOutputStream.writeLong(offset);
-  }
-
-  /**
-   * Close the file stream.
-   */
-  public void close() {
-    CarbonUtil.closeStreams(dataOutputStream);
-  }
-
-  /**
-   * Flush data to HDFS file
-   */
-  public void sync() throws IOException {
-    if (dataOutputStream instanceof FSDataOutputStream) {
-      ((FSDataOutputStream) dataOutputStream).hsync();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/exception/CarbonDataWriterException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/exception/CarbonDataWriterException.java b/core/src/main/java/org/carbondata/core/writer/exception/CarbonDataWriterException.java
deleted file mode 100644
index 1e9ee18..0000000
--- a/core/src/main/java/org/carbondata/core/writer/exception/CarbonDataWriterException.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.writer.exception;
-
-import java.util.Locale;
-
-public class CarbonDataWriterException extends Exception {
-
-  /**
-   * default serial version ID.
-   */
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * The Error message.
-   */
-  private String msg = "";
-
-  /**
-   * Constructor
-   *
-   * @param msg The error message for this exception.
-   */
-  public CarbonDataWriterException(String msg) {
-    super(msg);
-    this.msg = msg;
-  }
-
-  /**
-   * Constructor
-   *
-   * @param msg The error message for this exception.
-   */
-  public CarbonDataWriterException(String msg, Throwable t) {
-    super(msg, t);
-    this.msg = msg;
-  }
-
-  /**
-   * getLocalizedMessage
-   */
-  @Override public String getLocalizedMessage() {
-    return super.getLocalizedMessage();
-  }
-
-  /**
-   * getMessage
-   */
-  public String getMessage() {
-    return this.msg;
-  }
-
-  /**
-   * This method is used to get the localized message.
-   *
-   * @param locale - A Locale object represents a specific geographical,
-   *               political, or cultural region.
-   * @return - Localized error message.
-   */
-  public String getLocalizedMessage(Locale locale) {
-    return "";
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriter.java b/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriter.java
deleted file mode 100644
index 385efbe..0000000
--- a/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriter.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.writer.sortindex;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Interface for writing the dictionary sort index and sort index revers data.
- */
-public interface CarbonDictionarySortIndexWriter extends Closeable {
-
-  /**
-   * The method is used write the dictionary sortIndex data to columns
-   * sortedIndex file in thrif format.
-   *
-   * @param sortIndexList list of sortIndex
-   * @throws IOException In Case of any I/O errors occurs.
-   */
-  public void writeSortIndex(List<Integer> sortIndexList) throws IOException;
-
-  /**
-   * The method is used write the dictionary sortIndexInverted data to columns
-   * sortedIndex file in thrif format.
-   *
-   * @param invertedSortIndexList list of  sortIndexInverted
-   * @throws IOException In Case of any I/O errors occurs.
-   */
-  public void writeInvertedSortIndex(List<Integer> invertedSortIndexList) throws IOException;
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriterImpl.java b/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriterImpl.java
deleted file mode 100644
index 9c398cb..0000000
--- a/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriterImpl.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.writer.sortindex;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.List;
-
-import org.carbondata.common.factory.CarbonCommonFactory;
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.carbon.path.CarbonTablePath;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.filesystem.CarbonFile;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.service.PathService;
-import org.carbondata.core.util.CarbonProperties;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.core.writer.ThriftWriter;
-import org.carbondata.format.ColumnSortInfo;
-
-/**
- * The class responsible for writing the dictionary/column sort index and sort index inverted data
- * in the thrift format
- */
-public class CarbonDictionarySortIndexWriterImpl implements CarbonDictionarySortIndexWriter {
-
-  /**
-   * carbonTable Identifier holding the info of databaseName and tableName
-   */
-  protected CarbonTableIdentifier carbonTableIdentifier;
-
-  /**
-   * column name
-   */
-  protected ColumnIdentifier columnIdentifier;
-
-  /**
-   * carbon store location
-   */
-  protected String carbonStorePath;
-  /**
-   * Path of dictionary sort index file for which the sortIndex to be written
-   */
-  protected String sortIndexFilePath;
-  /**
-   * Instance of thrift writer to write the data
-   */
-  private ThriftWriter sortIndexThriftWriter;
-
-  /**
-   * Column sort info thrift instance.
-   */
-  private ColumnSortInfo columnSortInfo = new ColumnSortInfo();
-
-  /**
-   * Comment for <code>LOGGER</code>
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(CarbonDictionarySortIndexWriterImpl.class.getName());
-
-  /**
-   * @param carbonStorePath       Carbon store path
-   * @param carbonTableIdentifier table identifier which will give table name and database name
-   * @param columnIdentifier      column unique identifier
-   */
-  public CarbonDictionarySortIndexWriterImpl(final CarbonTableIdentifier carbonTableIdentifier,
-      final ColumnIdentifier columnIdentifier, final String carbonStorePath) {
-    this.carbonTableIdentifier = carbonTableIdentifier;
-    this.columnIdentifier = columnIdentifier;
-    this.carbonStorePath = carbonStorePath;
-  }
-
-  /**
-   * The method is used populate the dictionary sortIndex data to columnSortInfo
-   * in thrif format.
-   *
-   * @param sortIndexList list of sortIndex
-   * @throws IOException In Case of any I/O errors occurs.
-   */
-  @Override public void writeSortIndex(List<Integer> sortIndexList) throws IOException {
-    columnSortInfo.setSort_index(sortIndexList);
-  }
-
-  /**
-   * The method is used populate the dictionary Inverted sortIndex data to columnSortInfo
-   * in thrif format.
-   *
-   * @param invertedSortIndexList list of  sortIndexInverted
-   * @throws IOException In Case of any I/O errors occurs.
-   */
-  @Override public void writeInvertedSortIndex(List<Integer> invertedSortIndexList)
-      throws IOException {
-    columnSortInfo.setSort_index_inverted(invertedSortIndexList);
-  }
-
-  /**
-   * Initialize the sortIndexFilePath and open writing stream
-   * for dictionary sortIndex file thrif writer
-   * write the column sort info to the store when both sort index  and sort index
-   * inverted are populated.
-   * existing sort index file has to be overwritten with new sort index data
-   * columnSortInfo having null sortIndex and invertedSortIndex will not be written
-   */
-  private void writeColumnSortInfo() throws IOException {
-    boolean isNotNull =
-        null != columnSortInfo.getSort_index() && null != columnSortInfo.sort_index_inverted;
-    if (isNotNull) {
-      initPath();
-      String folderContainingFile = CarbonTablePath.getFolderContainingFile(this.sortIndexFilePath);
-      boolean created = CarbonUtil.checkAndCreateFolder(folderContainingFile);
-      if (!created) {
-        LOGGER.error("Database metadata folder creation status :: " + created);
-        throw new IOException("Failed to created database metadata folder");
-      }
-      try {
-
-        this.sortIndexThriftWriter = new ThriftWriter(this.sortIndexFilePath, false);
-        this.sortIndexThriftWriter.open();
-        sortIndexThriftWriter.write(columnSortInfo);
-      } catch (IOException ie) {
-        LOGGER.error(ie,
-            "problem while writing the dictionary sort index file.");
-        throw new IOException("problem while writing the dictionary sort index file.", ie);
-      } finally {
-        if (null != sortIndexThriftWriter) {
-          this.sortIndexThriftWriter.close();
-        }
-        this.sortIndexFilePath = null;
-      }
-    }
-  }
-
-  protected void initPath() {
-    PathService pathService = CarbonCommonFactory.getPathService();
-    CarbonTablePath carbonTablePath = pathService
-        .getCarbonTablePath(columnIdentifier, carbonStorePath, carbonTableIdentifier);
-    String dictionaryPath = carbonTablePath.getDictionaryFilePath(columnIdentifier.getColumnId());
-    long dictOffset = CarbonUtil.getFileSize(dictionaryPath);
-    this.sortIndexFilePath =
-        carbonTablePath.getSortIndexFilePath(columnIdentifier.getColumnId(), dictOffset);
-    cleanUpOldSortIndex(carbonTablePath, dictionaryPath);
-  }
-
-  /**
-   * It cleans up old unused sortindex file
-   *
-   * @param carbonTablePath
-   */
-  protected void cleanUpOldSortIndex(CarbonTablePath carbonTablePath, String dictPath) {
-    CarbonFile dictFile =
-        FileFactory.getCarbonFile(dictPath, FileFactory.getFileType(dictPath));
-    CarbonFile[] files =
-        carbonTablePath.getSortIndexFiles(dictFile.getParentFile(),
-            columnIdentifier.getColumnId());
-    int maxTime;
-    try {
-      maxTime = Integer.parseInt(CarbonProperties.getInstance()
-          .getProperty(CarbonCommonConstants.MAX_QUERY_EXECUTION_TIME));
-    } catch (NumberFormatException e) {
-      maxTime = CarbonCommonConstants.DEFAULT_MAX_QUERY_EXECUTION_TIME;
-    }
-    if (null != files) {
-      Arrays.sort(files, new Comparator<CarbonFile>() {
-        @Override public int compare(CarbonFile o1, CarbonFile o2) {
-          return o1.getName().compareTo(o2.getName());
-        }
-      });
-      for (int i = 0; i < files.length - 1; i++) {
-        long difference = System.currentTimeMillis() - files[i].getLastModifiedTime();
-        long minutesElapsed = (difference / (1000 * 60));
-        if (minutesElapsed > maxTime) {
-          if (!files[i].delete()) {
-            LOGGER.warn("Failed to delete sortindex file." + files[i].getAbsolutePath());
-          } else {
-            LOGGER.info("Sort index file is deleted." + files[i].getAbsolutePath());
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated
-   * with it. If the stream is already closed then invoking this
-   * method has no effect.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override public void close() throws IOException {
-    writeColumnSortInfo();
-    if (null != sortIndexThriftWriter) {
-      sortIndexThriftWriter.close();
-    }
-  }
-}


[19/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonDimension.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonDimension.java b/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonDimension.java
deleted file mode 100644
index d3df181..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonDimension.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.metadata.schema.table.column;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-
-public class CarbonDimension extends CarbonColumn {
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = 3648269871656322681L;
-
-  /**
-   * List of child dimension for complex type
-   */
-  private List<CarbonDimension> listOfChildDimensions;
-
-  /**
-   * in case of dictionary dimension this will store the ordinal
-   * of the dimension in mdkey
-   */
-  private int keyOrdinal;
-
-  /**
-   * column group column ordinal
-   * for example if column is second column in the group
-   * it will store 2
-   */
-  private int columnGroupOrdinal;
-
-  /**
-   * to store complex type dimension ordinal
-   */
-  private int complexTypeOrdinal;
-
-  public CarbonDimension(ColumnSchema columnSchema, int ordinal, int keyOrdinal,
-      int columnGroupOrdinal, int complexTypeOrdinal) {
-    super(columnSchema, ordinal);
-    this.keyOrdinal = keyOrdinal;
-    this.columnGroupOrdinal = columnGroupOrdinal;
-    this.complexTypeOrdinal = complexTypeOrdinal;
-  }
-
-  /**
-   * this method will initialize list based on number of child dimensions Count
-   */
-  public void initializeChildDimensionsList(int childDimension) {
-    listOfChildDimensions = new ArrayList<CarbonDimension>(childDimension);
-  }
-
-  /**
-   * @return number of children for complex type
-   */
-  public int getNumberOfChild() {
-    return columnSchema.getNumberOfChild();
-  }
-
-  /**
-   * @return list of children dims for complex type
-   */
-  public List<CarbonDimension> getListOfChildDimensions() {
-    return listOfChildDimensions;
-  }
-
-  /**
-   * @return return the number of child present in case of complex type
-   */
-  public int numberOfChild() {
-    return columnSchema.getNumberOfChild();
-  }
-
-  public boolean hasEncoding(Encoding encoding) {
-    return columnSchema.getEncodingList().contains(encoding);
-  }
-
-  /**
-   * @return the keyOrdinal
-   */
-  public int getKeyOrdinal() {
-    return keyOrdinal;
-  }
-
-  /**
-   * @return the columnGroupOrdinal
-   */
-  public int getColumnGroupOrdinal() {
-    return columnGroupOrdinal;
-  }
-
-  /**
-   * @return the complexTypeOrdinal
-   */
-  public int getComplexTypeOrdinal() {
-    return complexTypeOrdinal;
-  }
-
-  public void setComplexTypeOridnal(int complexTypeOrdinal) {
-    this.complexTypeOrdinal = complexTypeOrdinal;
-  }
-
-  /**
-   * to generate the hash code for this class
-   */
-  @Override public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + ((columnSchema == null) ? 0 : columnSchema.hashCode());
-    return result;
-  }
-
-  /**
-   * to check whether to dimension are equal or not
-   */
-  @Override public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (obj == null) {
-      return false;
-    }
-    if (!(obj instanceof CarbonDimension)) {
-      return false;
-    }
-    CarbonDimension other = (CarbonDimension) obj;
-    if (columnSchema == null) {
-      if (other.columnSchema != null) {
-        return false;
-      }
-    } else if (!columnSchema.equals(other.columnSchema)) {
-      return false;
-    }
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonMeasure.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonMeasure.java b/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonMeasure.java
deleted file mode 100644
index 6a3608a..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonMeasure.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.metadata.schema.table.column;
-
-/**
- * class represent column(measure) in table
- */
-public class CarbonMeasure extends CarbonColumn {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = 354341488059013977L;
-
-  /**
-   * aggregator chosen for measure
-   */
-  private String aggregateFunction;
-
-  /**
-   * Used when this column contains decimal data.
-   */
-  private int scale;
-
-  /**
-   * precision in decimal data
-   */
-  private int precision;
-
-  public CarbonMeasure(ColumnSchema columnSchema, int ordinal) {
-    super(columnSchema, ordinal);
-    this.scale = columnSchema.getScale();
-    this.precision = columnSchema.getPrecision();
-  }
-
-  /**
-   * @return the scale
-   */
-  public int getScale() {
-    return scale;
-  }
-
-  /**
-   * @return the precision
-   */
-  public int getPrecision() {
-    return precision;
-  }
-
-  /**
-   * @return the aggregator
-   */
-  public String getAggregateFunction() {
-    return aggregateFunction;
-  }
-
-  /**
-   * @param aggregateFunction the aggregateFunction to set
-   */
-  public void setAggregateFunction(String aggregateFunction) {
-    this.aggregateFunction = aggregateFunction;
-  }
-
-  /**
-   * to check whether to dimension are equal or not
-   */
-  @Override public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (obj == null) {
-      return false;
-    }
-    if (!(obj instanceof CarbonMeasure)) {
-      return false;
-    }
-    CarbonMeasure other = (CarbonMeasure) obj;
-    if (columnSchema == null) {
-      if (other.columnSchema != null) {
-        return false;
-      }
-    } else if (!columnSchema.equals(other.columnSchema)) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * hash code
-   * @return
-   */
-  @Override public int hashCode() {
-    return this.getColumnSchema().getColumnUniqueId().hashCode();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/ColumnSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/ColumnSchema.java b/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/ColumnSchema.java
deleted file mode 100644
index cc0bce9..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/ColumnSchema.java
+++ /dev/null
@@ -1,418 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.schema.table.column;
-
-import java.io.Serializable;
-import java.util.List;
-import java.util.Map;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-
-/**
- * Store the information about the column meta data present the table
- */
-public class ColumnSchema implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = 7676766554874863763L;
-
-  /**
-   * dataType
-   */
-  private DataType dataType;
-  /**
-   * Name of the column. If it is a complex data type, we follow a naming rule
-   * grand_parent_column.parent_column.child_column
-   * For Array types, two columns will be stored one for
-   * the array type and one for the primitive type with
-   * the name parent_column.value
-   */
-  private String columnName;
-
-  /**
-   * Unique ID for a column. if this is dimension,
-   * it is an unique ID that used in dictionary
-   */
-  private String columnUniqueId;
-
-  /**
-   * column reference id
-   */
-  private String columnReferenceId;
-
-  /**
-   * whether it is stored as columnar format or row format
-   */
-  private boolean isColumnar = true;
-
-  /**
-   * List of encoding that are chained to encode the data for this column
-   */
-  private List<Encoding> encodingList;
-
-  /**
-   * Whether the column is a dimension or measure
-   */
-  private boolean isDimensionColumn;
-
-  /**
-   * Whether the column should use inverted index
-   */
-  private boolean useInvertedIndex;
-
-  /**
-   * The group ID for column used for row format columns,
-   * where in columns in each group are chunked together.
-   */
-  private int columnGroupId = -1;
-
-  /**
-   * Used when this column contains decimal data.
-   */
-  private int scale;
-
-  private int precision;
-
-  /**
-   * Nested fields.  Since thrift does not support nested fields,
-   * the nesting is flattened to a single list by a depth-first traversal.
-   * The children count is used to construct the nested relationship.
-   * This field is not set when the element is a primitive type
-   */
-  private int numberOfChild;
-
-  /**
-   * Used when this column is part of an aggregate function.
-   */
-  private String aggregateFunction;
-
-  /**
-   * used in case of schema restructuring
-   */
-  private byte[] defaultValue;
-
-  /**
-   * Column properties
-   */
-  private Map<String, String> columnProperties;
-
-  /**
-   * used to define the column visibility of column default is false
-   */
-  private boolean invisible = false;
-
-  /**
-   * @return the columnName
-   */
-  public String getColumnName() {
-    return columnName;
-  }
-
-  /**
-   * @param columnName the columnName to set
-   */
-  public void setColumnName(String columnName) {
-    this.columnName = columnName;
-  }
-
-  /**
-   * @return the columnUniqueId
-   */
-  public String getColumnUniqueId() {
-    return columnUniqueId;
-  }
-
-  /**
-   * @param columnUniqueId the columnUniqueId to set
-   */
-  public void setColumnUniqueId(String columnUniqueId) {
-    this.columnUniqueId = columnUniqueId;
-  }
-
-  /**
-   * @return the isColumnar
-   */
-  public boolean isColumnar() {
-    return isColumnar;
-  }
-
-  /**
-   * @param isColumnar the isColumnar to set
-   */
-  public void setColumnar(boolean isColumnar) {
-    this.isColumnar = isColumnar;
-  }
-
-  /**
-   * @return the isDimensionColumn
-   */
-  public boolean isDimensionColumn() {
-    return isDimensionColumn;
-  }
-
-  /**
-   * @param isDimensionColumn the isDimensionColumn to set
-   */
-  public void setDimensionColumn(boolean isDimensionColumn) {
-    this.isDimensionColumn = isDimensionColumn;
-  }
-
-  /**
-   * the isUseInvertedIndex
-   */
-  public boolean isUseInvertedIndex() {
-    return useInvertedIndex;
-  }
-
-  /**
-   * @param useInvertedIndex the useInvertedIndex to set
-   */
-  public void setUseInvertedIndex(boolean useInvertedIndex) {
-    this.useInvertedIndex = useInvertedIndex;
-  }
-
-  /**
-   * @return the columnGroup
-   */
-  public int getColumnGroupId() {
-    return columnGroupId;
-  }
-
-  /**
-   * @param columnGroup the columnGroup to set
-   */
-  public void setColumnGroup(int columnGroupId) {
-    this.columnGroupId = columnGroupId;
-  }
-
-  /**
-   * @return the scale
-   */
-  public int getScale() {
-    return scale;
-  }
-
-  /**
-   * @param scale the scale to set
-   */
-  public void setScale(int scale) {
-    this.scale = scale;
-  }
-
-  /**
-   * @return the precision
-   */
-  public int getPrecision() {
-    return precision;
-  }
-
-  /**
-   * @param precision the precision to set
-   */
-  public void setPrecision(int precision) {
-    this.precision = precision;
-  }
-
-  /**
-   * @return the numberOfChild
-   */
-  public int getNumberOfChild() {
-    return numberOfChild;
-  }
-
-  /**
-   * @param numberOfChild the numberOfChild to set
-   */
-  public void setNumberOfChild(int numberOfChild) {
-    this.numberOfChild = numberOfChild;
-  }
-
-  /**
-   * @return the aggregator
-   */
-  public String getAggregateFunction() {
-    return aggregateFunction;
-  }
-
-  /**
-   * @param aggregateFunction the aggregator to set
-   */
-  public void setAggregateFunction(String aggregateFunction) {
-    this.aggregateFunction = aggregateFunction;
-  }
-
-  /**
-   * @return the defaultValue
-   */
-  public byte[] getDefaultValue() {
-    return defaultValue;
-  }
-
-  /**
-   * @param defaultValue the defaultValue to set
-   */
-  public void setDefaultValue(byte[] defaultValue) {
-    this.defaultValue = defaultValue;
-  }
-
-  /**
-   * hash code method to check get the hashcode based.
-   * for generating the hash code only column name and column unique id will considered
-   */
-  @Override public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + ((columnName == null) ? 0 : columnName.hashCode());
-    return result;
-  }
-
-  /**
-   * Overridden equals method for columnSchema
-   */
-  @Override public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (obj == null) {
-      return false;
-    }
-    if (!(obj instanceof ColumnSchema)) {
-      return false;
-    }
-    ColumnSchema other = (ColumnSchema) obj;
-    if (columnName == null) {
-      if (other.columnName != null) {
-        return false;
-      }
-    } else if (!columnName.equals(other.columnName)) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * @return the dataType
-   */
-  public DataType getDataType() {
-    return dataType;
-  }
-
-  /**
-   * @param dataType the dataType to set
-   */
-  public void setDataType(DataType dataType) {
-    this.dataType = dataType;
-  }
-
-  /**
-   * @return the encoderList
-   */
-  public List<Encoding> getEncodingList() {
-    return encodingList;
-  }
-
-  /**
-   * @param encoderList the encoderList to set
-   */
-  public void setEncodingList(List<Encoding> encodingList) {
-    this.encodingList = encodingList;
-  }
-
-  /**
-   * @param encoding
-   * @return true if contains the passing encoding
-   */
-  public boolean hasEncoding(Encoding encoding) {
-    if (encodingList == null || encodingList.isEmpty()) {
-      return false;
-    } else {
-      return encodingList.contains(encoding);
-    }
-  }
-
-  /**
-   * @return if DataType is ARRAY or STRUCT, this method return true, else
-   * false.
-   */
-  public Boolean isComplex() {
-    if (DataType.ARRAY.equals(this.getDataType()) || DataType.STRUCT.equals(this.getDataType())) {
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  /**
-   * @param columnProperties
-   */
-  public void setColumnProperties(Map<String, String> columnProperties) {
-    this.columnProperties = columnProperties;
-  }
-
-  /**
-   * @param property
-   * @return
-   */
-  public String getColumnProperty(String property) {
-    if (null != columnProperties) {
-      return columnProperties.get(property);
-    }
-    return null;
-  }
-
-  /**
-   * return columnproperties
-   */
-  public Map<String, String> getColumnProperties() {
-    return columnProperties;
-  }
-  /**
-   * return the visibility
-   * @return
-   */
-  public boolean isInvisible() {
-    return invisible;
-  }
-
-  /**
-   * set the visibility
-   * @param invisible
-   */
-  public void setInvisible(boolean invisible) {
-    this.invisible = invisible;
-  }
-
-  /**
-   * @return columnReferenceId
-   */
-  public String getColumnReferenceId() {
-    return columnReferenceId;
-  }
-
-  /**
-   * @param columnReferenceId
-   */
-  public void setColumnReferenceId(String columnReferenceId) {
-    this.columnReferenceId = columnReferenceId;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/path/CarbonSharedDictionaryPath.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/path/CarbonSharedDictionaryPath.java b/core/src/main/java/org/carbondata/core/carbon/path/CarbonSharedDictionaryPath.java
deleted file mode 100644
index a7c4760..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/path/CarbonSharedDictionaryPath.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.path;
-
-import java.io.File;
-
-/**
- * Helps to get Shared dimension files path.
- */
-public class CarbonSharedDictionaryPath {
-
-  private static final String SHAREDDIM_DIR = "SharedDictionary";
-  private static final String DICTIONARY_EXT = ".dict";
-  private static final String DICTIONARY_META_EXT = ".dictmeta";
-  private static final String SORT_INDEX_EXT = ".sortindex";
-
-  /***
-   * @param storePath    store path
-   * @param databaseName data base name
-   * @param columnId     unique column identifier
-   * @return absolute path of shared dictionary file
-   */
-  public static String getDictionaryFilePath(String storePath, String databaseName,
-      String columnId) {
-    return getSharedDictionaryDir(storePath, databaseName) + File.separator + columnId
-        + DICTIONARY_EXT;
-  }
-
-  /***
-   * @param storePath    store path
-   * @param databaseName data base name
-   * @param columnId     unique column identifier
-   * @return absolute path of shared dictionary meta file
-   */
-  public static String getDictionaryMetaFilePath(String storePath, String databaseName,
-      String columnId) {
-    return getSharedDictionaryDir(storePath, databaseName) + File.separator + columnId
-        + DICTIONARY_META_EXT;
-  }
-
-  /***
-   * @param storePath    store path
-   * @param databaseName data base name
-   * @param columnId     unique column identifier
-   * @return absolute path of shared dictionary sort index file
-   */
-  public static String getSortIndexFilePath(String storePath, String databaseName,
-      String columnId) {
-    return getSharedDictionaryDir(storePath, databaseName) + File.separator + columnId
-        + SORT_INDEX_EXT;
-  }
-
-  private static String getSharedDictionaryDir(String storePath, String databaseName) {
-    return storePath + File.separator + databaseName + File.separator + SHAREDDIM_DIR;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/path/CarbonStorePath.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/path/CarbonStorePath.java b/core/src/main/java/org/carbondata/core/carbon/path/CarbonStorePath.java
deleted file mode 100644
index eac98e6..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/path/CarbonStorePath.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.path;
-
-import java.io.File;
-
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-
-import org.apache.hadoop.fs.Path;
-
-/**
- * Helps to get Store content paths.
- */
-public class CarbonStorePath extends Path {
-
-  private String storePath;
-
-  public CarbonStorePath(String storePathString) {
-    super(storePathString);
-    this.storePath = storePathString;
-  }
-
-  /**
-   * gets CarbonTablePath object to manage table paths
-   */
-  public static CarbonTablePath getCarbonTablePath(String storePath,
-      CarbonTableIdentifier tableIdentifier) {
-    CarbonTablePath carbonTablePath = new CarbonTablePath(tableIdentifier,
-        storePath + File.separator + tableIdentifier.getDatabaseName() + File.separator
-            + tableIdentifier.getTableName());
-
-    return carbonTablePath;
-  }
-
-  /**
-   * gets CarbonTablePath object to manage table paths
-   */
-  public CarbonTablePath getCarbonTablePath(CarbonTableIdentifier tableIdentifier) {
-    return CarbonStorePath.getCarbonTablePath(storePath, tableIdentifier);
-  }
-
-  @Override public boolean equals(Object o) {
-    if (!(o instanceof CarbonStorePath)) {
-      return false;
-    }
-    CarbonStorePath path = (CarbonStorePath)o;
-    return storePath.equals(path.storePath) && super.equals(o);
-  }
-
-  @Override public int hashCode() {
-    return super.hashCode() + storePath.hashCode();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/path/CarbonTablePath.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/path/CarbonTablePath.java b/core/src/main/java/org/carbondata/core/carbon/path/CarbonTablePath.java
deleted file mode 100644
index a4ff240..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/path/CarbonTablePath.java
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.path;
-
-import java.io.File;
-
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.filesystem.CarbonFile;
-import org.carbondata.core.datastorage.store.filesystem.CarbonFileFilter;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-
-import static org.carbondata.core.constants.CarbonCommonConstants.INVALID_SEGMENT_ID;
-
-import org.apache.hadoop.fs.Path;
-
-
-/**
- * Helps to get Table content paths.
- */
-public class CarbonTablePath extends Path {
-
-  protected static final String METADATA_DIR = "Metadata";
-  protected static final String DICTIONARY_EXT = ".dict";
-  protected static final String DICTIONARY_META_EXT = ".dictmeta";
-  protected static final String SORT_INDEX_EXT = ".sortindex";
-  protected static final String SCHEMA_FILE = "schema";
-  protected static final String TABLE_STATUS_FILE = "tablestatus";
-  protected static final String FACT_DIR = "Fact";
-  protected static final String AGGREGATE_TABLE_PREFIX = "Agg";
-  protected static final String SEGMENT_PREFIX = "Segment_";
-  protected static final String PARTITION_PREFIX = "Part";
-  protected static final String CARBON_DATA_EXT = ".carbondata";
-  protected static final String DATA_PART_PREFIX = "part";
-  protected static final String INDEX_FILE_EXT = ".carbonindex";
-
-  protected String tablePath;
-  protected CarbonTableIdentifier carbonTableIdentifier;
-
-  /**
-   *
-   * @param carbonTableIdentifier
-   * @param tablePathString
-   */
-  public CarbonTablePath(CarbonTableIdentifier carbonTableIdentifier, String tablePathString) {
-    super(tablePathString);
-    this.carbonTableIdentifier = carbonTableIdentifier;
-    this.tablePath = tablePathString;
-  }
-
-  /**
-   * The method returns the folder path containing the carbon file.
-   *
-   * @param carbonFilePath
-   */
-  public static String getFolderContainingFile(String carbonFilePath) {
-    return carbonFilePath.substring(0, carbonFilePath.lastIndexOf(File.separator));
-  }
-
-  /**
-   * @param columnId unique column identifier
-   * @return name of dictionary file
-   */
-  public static String getDictionaryFileName(String columnId) {
-    return columnId + DICTIONARY_EXT;
-  }
-
-  /**
-   * whether carbonFile is dictionary file or not
-   *
-   * @param carbonFile
-   * @return
-   */
-  public static Boolean isDictionaryFile(CarbonFile carbonFile) {
-    return (!carbonFile.isDirectory()) && (carbonFile.getName().endsWith(DICTIONARY_EXT));
-  }
-
-  /**
-   * check if it is carbon data file matching extension
-   *
-   * @param fileNameWithPath
-   * @return boolean
-   */
-  public static boolean isCarbonDataFile(String fileNameWithPath) {
-    int pos = fileNameWithPath.lastIndexOf('.');
-    if (pos != -1) {
-      return fileNameWithPath.substring(pos).startsWith(CARBON_DATA_EXT);
-    }
-    return false;
-  }
-
-  /**
-   * check if it is carbon index file matching extension
-   *
-   * @param fileNameWithPath
-   * @return boolean
-   */
-  public static boolean isCarbonIndexFile(String fileNameWithPath) {
-    int pos = fileNameWithPath.lastIndexOf('.');
-    if (pos != -1) {
-      return fileNameWithPath.substring(pos).startsWith(INDEX_FILE_EXT);
-    }
-    return false;
-  }
-
-  /**
-   * gets table path
-   */
-  public String getPath() {
-    return tablePath;
-  }
-
-  /**
-   * @param columnId unique column identifier
-   * @return absolute path of dictionary file
-   */
-  public String getDictionaryFilePath(String columnId) {
-    return getMetaDataDir() + File.separator + getDictionaryFileName(columnId);
-  }
-
-  /**
-   * @return it return relative directory
-   */
-  public String getRelativeDictionaryDirectory() {
-    return carbonTableIdentifier.getDatabaseName() + File.separator + carbonTableIdentifier
-        .getTableName();
-  }
-
-  /**
-   * This method will return the metadata directory location for a table
-   *
-   * @return
-   */
-  public String getMetadataDirectoryPath() {
-    return getMetaDataDir();
-  }
-
-  /**
-   * @param columnId unique column identifier
-   * @return absolute path of dictionary meta file
-   */
-  public String getDictionaryMetaFilePath(String columnId) {
-    return getMetaDataDir() + File.separator + columnId + DICTIONARY_META_EXT;
-  }
-
-  /**
-   * @param columnId unique column identifier
-   * @return absolute path of sort index file
-   */
-  public String getSortIndexFilePath(String columnId) {
-    return getMetaDataDir() + File.separator + columnId + SORT_INDEX_EXT;
-  }
-
-  /**
-   *
-   * @param columnId
-   * @param dictOffset
-   * @return absolute path of sortindex with appeneded dictionary offset
-   */
-  public String getSortIndexFilePath(String columnId, long dictOffset) {
-    return getMetaDataDir() + File.separator + columnId + "_" + dictOffset + SORT_INDEX_EXT;
-  }
-
-  /**
-   * @return absolute path of schema file
-   */
-  public String getSchemaFilePath() {
-    return getMetaDataDir() + File.separator + SCHEMA_FILE;
-  }
-
-  /**
-   * @return absolute path of table status file
-   */
-  public String getTableStatusFilePath() {
-    return getMetaDataDir() + File.separator + TABLE_STATUS_FILE;
-  }
-
-  /**
-   * Gets absolute path of data file
-   *
-   * @param partitionId         unique partition identifier
-   * @param segmentId           unique partition identifier
-   * @param filePartNo          data file part number
-   * @param factUpdateTimeStamp unique identifier to identify an update
-   * @return absolute path of data file stored in carbon data format
-   */
-  public String getCarbonDataFilePath(String partitionId, String segmentId, Integer filePartNo,
-      Integer taskNo, String factUpdateTimeStamp) {
-    return getSegmentDir(partitionId, segmentId) + File.separator + getCarbonDataFileName(
-        filePartNo, taskNo, factUpdateTimeStamp);
-  }
-
-  /**
-   * Below method will be used to get the index file present in the segment folder
-   * based on task id
-   *
-   * @param taskId      task id of the file
-   * @param partitionId partition number
-   * @param segmentId   segment number
-   * @return full qualified carbon index path
-   */
-  public String getCarbonIndexFilePath(final String taskId, final String partitionId,
-      final String segmentId) {
-    String segmentDir = getSegmentDir(partitionId, segmentId);
-    CarbonFile carbonFile =
-        FileFactory.getCarbonFile(segmentDir, FileFactory.getFileType(segmentDir));
-
-    CarbonFile[] files = carbonFile.listFiles(new CarbonFileFilter() {
-      @Override public boolean accept(CarbonFile file) {
-        return file.getName().startsWith(taskId) && file.getName().endsWith(INDEX_FILE_EXT);
-      }
-    });
-    return files[0].getAbsolutePath();
-  }
-
-  /**
-   * Gets absolute path of data file
-   *
-   * @param partitionId unique partition identifier
-   * @param segmentId   unique partition identifier
-   * @return absolute path of data file stored in carbon data format
-   */
-  public String getCarbonDataDirectoryPath(String partitionId, String segmentId) {
-    return getSegmentDir(partitionId, segmentId);
-  }
-
-  /**
-   * Gets absolute path of data file of given aggregate table
-   *
-   * @param aggTableID          unique aggregate table identifier
-   * @param partitionId         unique partition identifier
-   * @param segmentId           unique partition identifier
-   * @param filePartNo          data file part number
-   * @param factUpdateTimeStamp unique identifier to identify an update
-   * @return absolute path of data file stored in carbon data format
-   */
-  public String getCarbonAggDataFilePath(String aggTableID, String partitionId, String segmentId,
-      Integer filePartNo, Integer taskNo, String factUpdateTimeStamp) {
-    return getAggSegmentDir(aggTableID, partitionId, segmentId) + File.separator
-        + getCarbonDataFileName(filePartNo, taskNo, factUpdateTimeStamp);
-  }
-
-  /**
-   * Gets data file name only with out path
-   *
-   * @param filePartNo          data file part number
-   * @param taskNo              task identifier
-   * @param factUpdateTimeStamp unique identifier to identify an update
-   * @return gets data file name only with out path
-   */
-  public String getCarbonDataFileName(Integer filePartNo, Integer taskNo,
-      String factUpdateTimeStamp) {
-    return DATA_PART_PREFIX + "-" + filePartNo + "-" + taskNo + "-" + factUpdateTimeStamp
-        + CARBON_DATA_EXT;
-  }
-
-  /**
-   * Below method will be used to get the carbon index filename
-   *
-   * @param taskNo               task number
-   * @param factUpdatedTimeStamp time stamp
-   * @return filename
-   */
-  public String getCarbonIndexFileName(int taskNo, String factUpdatedTimeStamp) {
-    return taskNo + "-" + factUpdatedTimeStamp + INDEX_FILE_EXT;
-  }
-
-  private String getSegmentDir(String partitionId, String segmentId) {
-    return getPartitionDir(partitionId) + File.separator + SEGMENT_PREFIX + segmentId;
-  }
-
-  public String getPartitionDir(String partitionId) {
-    return getFactDir() + File.separator + PARTITION_PREFIX + partitionId;
-  }
-
-  private String getAggSegmentDir(String aggTableID, String partitionId, String segmentId) {
-    return getAggPartitionDir(aggTableID, partitionId) + File.separator + SEGMENT_PREFIX
-        + segmentId;
-  }
-
-  private String getAggPartitionDir(String aggTableID, String partitionId) {
-    return getAggregateTableDir(aggTableID) + File.separator + PARTITION_PREFIX + partitionId;
-  }
-
-  private String getMetaDataDir() {
-    return tablePath + File.separator + METADATA_DIR;
-  }
-
-  public String getFactDir() {
-    return tablePath + File.separator + FACT_DIR;
-  }
-
-  private String getAggregateTableDir(String aggTableId) {
-    return tablePath + File.separator + AGGREGATE_TABLE_PREFIX + aggTableId;
-  }
-
-  @Override public boolean equals(Object o) {
-    if (!(o instanceof CarbonTablePath)) {
-      return false;
-    }
-    CarbonTablePath path = (CarbonTablePath) o;
-    return tablePath.equals(path.tablePath) && super.equals(o);
-  }
-
-  @Override public int hashCode() {
-    return super.hashCode() + tablePath.hashCode();
-  }
-
-  /**
-   * To manage data file name and composition
-   */
-  public static class DataFileUtil {
-
-    /**
-     * gets updated timestamp information from given carbon data file name
-     */
-    public static String getUpdateTimeStamp(String carbonDataFileName) {
-      // Get the file name from path
-      String fileName = getFileName(carbonDataFileName);
-      // + 1 for size of "-"
-      int firstDashPos = fileName.indexOf("-");
-      int secondDashPos = fileName.indexOf("-", firstDashPos + 1);
-      int startIndex = fileName.indexOf("-", secondDashPos + 1) + 1;
-      int endIndex = fileName.indexOf(".");
-      return fileName.substring(startIndex, endIndex);
-    }
-
-    /**
-     * gets file part number information from given carbon data file name
-     */
-    public static String getPartNo(String carbonDataFileName) {
-      // Get the file name from path
-      String fileName = getFileName(carbonDataFileName);
-      // + 1 for size of "-"
-      int startIndex = fileName.indexOf("-") + 1;
-      int endIndex = fileName.indexOf("-", startIndex);
-      return fileName.substring(startIndex, endIndex);
-    }
-
-    /**
-     * gets updated timestamp information from given carbon data file name
-     */
-    public static String getTaskNo(String carbonDataFileName) {
-      // Get the file name from path
-      String fileName = getFileName(carbonDataFileName);
-      // + 1 for size of "-"
-      int firstDashPos = fileName.indexOf("-");
-      int startIndex = fileName.indexOf("-", firstDashPos + 1) + 1;
-      int endIndex = fileName.indexOf("-", startIndex);
-      return fileName.substring(startIndex, endIndex);
-    }
-
-    /**
-     * Gets the file name from file path
-     */
-    private static String getFileName(String carbonDataFileName) {
-      int endIndex = carbonDataFileName.lastIndexOf(CarbonCommonConstants.FILE_SEPARATOR);
-      if (endIndex > -1) {
-        return carbonDataFileName.substring(endIndex + 1, carbonDataFileName.length());
-      } else {
-        return carbonDataFileName;
-      }
-    }
-  }
-
-  /**
-   * To manage data path and composition
-   */
-  public static class DataPathUtil {
-
-    /**
-     * gets segement id from given absolute data file path
-     */
-    public static String getSegmentId(String dataFileAbsolutePath) {
-      // find segment id from last of data file path
-      int endIndex = dataFileAbsolutePath.lastIndexOf(CarbonCommonConstants.FILE_SEPARATOR);
-      // + 1 for size of "/"
-      int startIndex =
-          dataFileAbsolutePath.lastIndexOf(CarbonCommonConstants.FILE_SEPARATOR, endIndex - 1) + 1;
-      String segmentDirStr = dataFileAbsolutePath.substring(startIndex, endIndex);
-      //identify id in segment_<id>
-      String[] segmentDirSplits = segmentDirStr.split("_");
-      try {
-        if (segmentDirSplits.length == 2) {
-          return segmentDirSplits[1];
-        }
-      } catch (Exception e) {
-        return INVALID_SEGMENT_ID;
-      }
-      return INVALID_SEGMENT_ID;
-    }
-  }
-
-  /**
-   * Below method will be used to get sort index file present in mentioned folder
-   *
-   * @param sortIndexDir directory where sort index file resides
-   * @param columnUniqueId   columnunique id
-   * @return sort index carbon files
-   */
-  public CarbonFile[] getSortIndexFiles(CarbonFile sortIndexDir, final String columnUniqueId) {
-    CarbonFile[] files = sortIndexDir.listFiles(new CarbonFileFilter() {
-      @Override public boolean accept(CarbonFile file) {
-        return file.getName().startsWith(columnUniqueId) && file.getName().endsWith(SORT_INDEX_EXT);
-      }
-    });
-    return files;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/querystatistics/QueryStatistic.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/querystatistics/QueryStatistic.java b/core/src/main/java/org/carbondata/core/carbon/querystatistics/QueryStatistic.java
deleted file mode 100644
index 80398c1..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/querystatistics/QueryStatistic.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.querystatistics;
-
-import java.io.Serializable;
-
-/**
- * Wrapper class to maintain the query statistics for each phase of the query
- */
-public class QueryStatistic implements Serializable {
-
-  /**
-   * serialization id
-   */
-  private static final long serialVersionUID = -5667106646135905848L;
-
-  /**
-   * statistic message
-   */
-  private String message;
-
-  /**
-   * total time take of the phase
-   */
-  private long timeTaken;
-
-  /**
-   * starttime of the phase
-   */
-  private long startTime;
-
-  public QueryStatistic() {
-    this.startTime = System.currentTimeMillis();
-  }
-
-  /**
-   * below method will be used to add the statistic
-   *
-   * @param message     Statistic message
-   * @param currentTime current time
-   */
-  public void addStatistics(String message, long currentTime) {
-    this.timeTaken = currentTime - startTime;
-    this.message = message;
-  }
-
-  /**
-   * Below method will be used to add fixed time statistic.
-   * For example total time taken for scan or result preparation
-   *
-   * @param message   statistic message
-   * @param timetaken
-   */
-  public void addFixedTimeStatistic(String message, long timetaken) {
-    this.timeTaken = timetaken;
-    this.message = message;
-  }
-
-  /**
-   * Below method will be used to get the statistic message, which will
-   * be used to log
-   *
-   * @param queryWithTaskId query with task id to append in the message
-   * @return statistic message
-   */
-  public String getStatistics(String queryWithTaskId) {
-    return message + " for the taskid : " + queryWithTaskId + " Is : " + timeTaken;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/querystatistics/QueryStatisticsRecorder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/querystatistics/QueryStatisticsRecorder.java b/core/src/main/java/org/carbondata/core/carbon/querystatistics/QueryStatisticsRecorder.java
deleted file mode 100644
index 961d744..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/querystatistics/QueryStatisticsRecorder.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.querystatistics;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-
-/**
- * Class will be used to record and log the query statistics
- */
-public class QueryStatisticsRecorder implements Serializable {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(QueryStatisticsRecorder.class.getName());
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = -5719752001674467864L;
-
-  /**
-   * list for statistics to record time taken
-   * by each phase of the query for example aggregation
-   * scanning,block loading time etc.
-   */
-  private List<QueryStatistic> queryStatistics;
-
-  /**
-   * query with taskd
-   */
-  private String queryIWthTask;
-
-  public QueryStatisticsRecorder(String queryId) {
-    queryStatistics = new ArrayList<QueryStatistic>();
-    this.queryIWthTask = queryId;
-  }
-
-  /**
-   * Below method will be used to add the statistics
-   *
-   * @param statistic
-   */
-  public synchronized void recordStatistics(QueryStatistic statistic) {
-    queryStatistics.add(statistic);
-  }
-
-  /**
-   * Below method will be used to log the statistic
-   */
-  public void logStatistics() {
-    for (QueryStatistic statistic : queryStatistics) {
-      LOGGER.statistic(statistic.getStatistics(queryIWthTask));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/carbondata/core/constants/CarbonCommonConstants.java
deleted file mode 100644
index 2da1957..0000000
--- a/core/src/main/java/org/carbondata/core/constants/CarbonCommonConstants.java
+++ /dev/null
@@ -1,892 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.constants;
-
-public final class CarbonCommonConstants {
-  /**
-   * integer size in bytes
-   */
-  public static final int INT_SIZE_IN_BYTE = 4;
-  /**
-   * short size in bytes
-   */
-  public static final int SHORT_SIZE_IN_BYTE = 2;
-  /**
-   * DOUBLE size in bytes
-   */
-  public static final int DOUBLE_SIZE_IN_BYTE = 8;
-  /**
-   * LONG size in bytes
-   */
-  public static final int LONG_SIZE_IN_BYTE = 8;
-  /**
-   * byte to KB conversion factor
-   */
-  public static final int BYTE_TO_KB_CONVERSION_FACTOR = 1024;
-  /**
-   * BYTE_ENCODING
-   */
-  public static final String BYTE_ENCODING = "ISO-8859-1";
-  /**
-   * measure meta data file name
-   */
-  public static final String MEASURE_METADATA_FILE_NAME = "/msrMetaData_";
-  /**
-   * location of the carbon member, hierarchy and fact files
-   */
-  public static final String STORE_LOCATION = "carbon.storelocation";
-  /**
-   * blocklet size in carbon file
-   */
-  public static final String BLOCKLET_SIZE = "carbon.blocklet.size";
-  /**
-   * TODO: max number of blocklets written in a single file?
-   */
-  public static final String MAX_FILE_SIZE = "carbon.max.file.size";
-  /**
-   * Number of cores to be used
-   */
-  public static final String NUM_CORES = "carbon.number.of.cores";
-  /**
-   * carbon sort size
-   */
-  public static final String SORT_SIZE = "carbon.sort.size";
-  /**
-   * default location of the carbon member, hierarchy and fact files
-   */
-  public static final String STORE_LOCATION_DEFAULT_VAL = "../carbon.store";
-  /**
-   * the folder name of kettle home path
-   */
-  public static final String KETTLE_HOME_NAME = "carbonplugins";
-  /**
-   * CARDINALITY_INCREMENT_DEFAULT_VALUE
-   */
-  public static final int CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL = 10;
-  /**
-   * default blocklet size
-   */
-  public static final String BLOCKLET_SIZE_DEFAULT_VAL = "120000";
-  /**
-   * min blocklet size
-   */
-  public static final int BLOCKLET_SIZE_MIN_VAL = 50;
-  /**
-   * max blocklet size
-   */
-  public static final int BLOCKLET_SIZE_MAX_VAL = 12000000;
-  /**
-   * TODO: default value of max number of blocklet written in a single file?
-   */
-  public static final String MAX_FILE_SIZE_DEFAULT_VAL = "1024";
-  /**
-   * TODO: min value of max number of blocklets written in a single file?
-   */
-  public static final int MAX_FILE_SIZE_DEFAULT_VAL_MIN_VAL = 1;
-  /**
-   * max allowed block size for a file. If block size is greater than this value
-   * then the value is reset to default block size for a file
-   */
-  public static final int MAX_FILE_SIZE_DEFAULT_VAL_MAX_VAL = 2048;
-  /**
-   * default value of number of cores to be used
-   */
-  public static final String NUM_CORES_DEFAULT_VAL = "2";
-  /**
-   * min value of number of cores to be used
-   */
-  public static final int NUM_CORES_MIN_VAL = 1;
-  /**
-   * max value of number of cores to be used
-   */
-  public static final int NUM_CORES_MAX_VAL = 32;
-  /**
-   * default carbon sort size
-   */
-  public static final String SORT_SIZE_DEFAULT_VAL = "100000";
-  /**
-   * min carbon sort size
-   */
-  public static final int SORT_SIZE_MIN_VAL = 1000;
-  /**
-   * carbon properties file path
-   */
-  public static final String CARBON_PROPERTIES_FILE_PATH = "../../../conf/carbon.properties";
-  /**
-   * CARBON_DDL_BASE_HDFS_URL
-   */
-  public static final String CARBON_DDL_BASE_HDFS_URL = "carbon.ddl.base.hdfs.url";
-  /**
-   * Slice Meta data file.
-   */
-  public static final String SLICE_METADATA_FILENAME = "sliceMetaData";
-  /**
-   * Load Folder Name
-   */
-  public static final String LOAD_FOLDER = "Segment_";
-  /**
-   * RESTructure Folder
-   */
-  public static final String RESTRUCTRE_FOLDER = "RS_";
-  /**
-   * BYTEBUFFER_SIZE
-   */
-
-  public static final int BYTEBUFFER_SIZE = 24 * 1024;
-  /**
-   * Average constant
-   */
-  public static final String AVERAGE = "avg";
-  /**
-   * Count constant
-   */
-  public static final String COUNT = "count";
-  /**
-   * Count constant
-   */
-  public static final String COUNT_STAR = "countstar";
-  /**
-   * Max constant
-   */
-  public static final String MAX = "max";
-  /**
-   * Min constant
-   */
-  public static final String MIN = "min";
-  /**
-   * distinct count
-   */
-  public static final String DISTINCT_COUNT = "distinct-count";
-  /**
-   * CUSTOM
-   */
-  public static final String CUSTOM = "custom";
-  /**
-   * SUM
-   */
-  public static final String SUM = "sum";
-  /**
-   * DUMMY aggregation function
-   */
-  public static final String DUMMY = "dummy";
-  /**
-   * MEMBER_DEFAULT_VAL
-   */
-  public static final String MEMBER_DEFAULT_VAL = "@NU#LL$!";
-  /**
-   * BLANK_LINE_FLAG
-   */
-  public static final String BLANK_LINE_FLAG = "@NU#LL$!BLANKLINE";
-  /**
-   * FILE STATUS IN-PROGRESS
-   */
-  public static final String FILE_INPROGRESS_STATUS = ".inprogress";
-  /**
-   * CARBON_BADRECORDS_LOCATION
-   */
-  public static final String CARBON_BADRECORDS_LOC = "carbon.badRecords.location";
-  /**
-   * CARBON_BADRECORDS_LOCATION_DEFAULT
-   */
-  public static final String CARBON_BADRECORDS_LOC_DEFAULT_VAL =
-      "../unibi-solutions/system/carbon/badRecords";
-  /**
-   * HIERARCHY_FILE_EXTENSION
-   */
-  public static final String HIERARCHY_FILE_EXTENSION = ".hierarchy";
-  /**
-   * SORT_TEMP_FILE_LOCATION
-   */
-  public static final String SORT_TEMP_FILE_LOCATION = "sortrowtmp";
-  /**
-   * CARBON_RESULT_SIZE_DEFAULT
-   */
-  public static final String LEVEL_FILE_EXTENSION = ".level";
-  /**
-   * FACT_FILE_EXT
-   */
-  public static final String FACT_FILE_EXT = ".carbondata";
-  /**
-   * MEASUREMETADATA_FILE_EXT
-   */
-  public static final String MEASUREMETADATA_FILE_EXT = ".msrmetadata";
-  /**
-   * GRAPH_ROWSET_SIZE
-   */
-  public static final String GRAPH_ROWSET_SIZE = "carbon.graph.rowset.size";
-  /**
-   * GRAPH_ROWSET_SIZE_DEFAULT
-   */
-  public static final String GRAPH_ROWSET_SIZE_DEFAULT = "500";
-  /**
-   * Comment for <code>TYPE_MYSQL</code>
-   */
-  public static final String TYPE_MYSQL = "MYSQL";
-  /**
-   * Comment for <code>TYPE_MSSQL</code>
-   */
-  public static final String TYPE_MSSQL = "MSSQL";
-  /**
-   * Comment for <code>TYPE_ORACLE</code>
-   */
-  public static final String TYPE_ORACLE = "ORACLE";
-  /**
-   * Comment for <code>TYPE_SYBASE</code>
-   */
-  public static final String TYPE_SYBASE = "SYBASE";
-  /**
-   * SORT_INTERMEDIATE_FILES_LIMIT
-   */
-  public static final String SORT_INTERMEDIATE_FILES_LIMIT = "carbon.sort.intermediate.files.limit";
-  /**
-   * SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE
-   */
-  public static final String SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE = "20";
-  /**
-   * MERGERD_EXTENSION
-   */
-  public static final String MERGERD_EXTENSION = ".merge";
-  /**
-   * SORT_FILE_BUFFER_SIZE
-   */
-  public static final String SORT_FILE_BUFFER_SIZE = "carbon.sort.file.buffer.size";
-  /**
-   * no.of records after which counter to be printed
-   */
-  public static final String DATA_LOAD_LOG_COUNTER = "carbon.load.log.counter";
-  /**
-   * DATA_LOAD_LOG_COUNTER_DEFAULT_COUNTER
-   */
-  public static final String DATA_LOAD_LOG_COUNTER_DEFAULT_COUNTER = "500000";
-  /**
-   * SORT_FILE_WRITE_BUFFER_SIZE
-   */
-  public static final String CARBON_SORT_FILE_WRITE_BUFFER_SIZE =
-      "carbon.sort.file.write.buffer.size";
-  /**
-   * SORT_FILE_WRITE_BUFFER_SIZE_DEFAULT_VALUE
-   */
-  public static final String CARBON_SORT_FILE_WRITE_BUFFER_SIZE_DEFAULT_VALUE = "50000";
-  /**
-   * Number of cores to be used while loading
-   */
-  public static final String NUM_CORES_LOADING = "carbon.number.of.cores.while.loading";
-  /**
-   * Number of cores to be used while compacting
-   */
-  public static final String NUM_CORES_COMPACTING = "carbon.number.of.cores.while.compacting";
-  /**
-   * Number of cores to be used for block sort
-   */
-  public static final String NUM_CORES_BLOCK_SORT = "carbon.number.of.cores.block.sort";
-  /**
-   * Default value of number of cores to be used for block sort
-   */
-  public static final String NUM_CORES_BLOCK_SORT_DEFAULT_VAL = "7";
-  /**
-   * Max value of number of cores to be used for block sort
-   */
-  public static final int NUM_CORES_BLOCK_SORT_MAX_VAL = 12;
-  /**
-   * Min value of number of cores to be used for block sort
-   */
-  public static final int NUM_CORES_BLOCK_SORT_MIN_VAL = 1;
-  /**
-   * CSV_READ_BUFFER_SIZE
-   */
-  public static final String CSV_READ_BUFFER_SIZE = "carbon.csv.read.buffersize.byte";
-  /**
-   * CSV_READ_BUFFER_SIZE
-   */
-  public static final String CSV_READ_BUFFER_SIZE_DEFAULT = "50000";
-  /**
-   * CSV_READ_COPIES
-   */
-  public static final String DEFAULT_NUMBER_CORES = "2";
-  /**
-   * CSV_FILE_EXTENSION
-   */
-  public static final String CSV_FILE_EXTENSION = ".csv";
-  /**
-   * COLON_SPC_CHARACTER
-   */
-  public static final String COLON_SPC_CHARACTER = ":!@#COLON#@!:";
-  /**
-   * HASH_SPC_CHARATER
-   */
-  public static final String HASH_SPC_CHARACTER = "#!@:HASH:@!#";
-  /**
-   * SEMICOLON_SPC_CHARATER
-   */
-  public static final String SEMICOLON_SPC_CHARACTER = ";#!@:SEMIC:@!#;";
-  /**
-   * AMPERSAND_SPC_CHARATER
-   */
-  public static final String AMPERSAND_SPC_CHARACTER = "&#!@:AMPER:@!#&";
-  /**
-   * ATTHERATE_SPC_CHARATER
-   */
-  public static final String COMA_SPC_CHARACTER = ",#!:COMA:!#,";
-  /**
-   * HYPHEN_SPC_CHARACTER
-   */
-  public static final String HYPHEN_SPC_CHARACTER = "-#!:HYPHEN:!#-";
-  /**
-   * CARBON_DECIMAL_POINTERS_DEFAULT
-   */
-  public static final byte CARBON_DECIMAL_POINTERS_DEFAULT = 5;
-  /**
-   * SORT_TEMP_FILE_EXT
-   */
-  public static final String SORT_TEMP_FILE_EXT = ".sorttemp";
-  /**
-   * CARBON_MERGE_SORT_READER_THREAD
-   */
-  public static final String CARBON_MERGE_SORT_READER_THREAD = "carbon.merge.sort.reader.thread";
-  /**
-   * CARBON_MERGE_SORT_READER_THREAD_DEFAULTVALUE
-   */
-  public static final String CARBON_MERGE_SORT_READER_THREAD_DEFAULTVALUE = "3";
-  /**
-   * IS_SORT_TEMP_FILE_COMPRESSION_ENABLED
-   */
-  public static final String IS_SORT_TEMP_FILE_COMPRESSION_ENABLED =
-      "carbon.is.sort.temp.file.compression.enabled";
-  /**
-   * IS_SORT_TEMP_FILE_COMPRESSION_ENABLED_DEFAULTVALUE
-   */
-  public static final String IS_SORT_TEMP_FILE_COMPRESSION_ENABLED_DEFAULTVALUE = "false";
-  /**
-   * SORT_TEMP_FILE_NO_OF_RECORDS_FOR_COMPRESSION
-   */
-  public static final String SORT_TEMP_FILE_NO_OF_RECORDS_FOR_COMPRESSION =
-      "carbon.sort.temp.file.no.of.records.for.compression";
-  /**
-   * SORT_TEMP_FILE_NO_OF_RECORD_FOR_COMPRESSION_DEFAULTVALUE
-   */
-  public static final String SORT_TEMP_FILE_NO_OF_RECORD_FOR_COMPRESSION_DEFAULTVALUE = "50";
-  /**
-   * DEFAULT_COLLECTION_SIZE
-   */
-  public static final int DEFAULT_COLLECTION_SIZE = 16;
-  /**
-   * CARBON_TIMESTAMP_DEFAULT_FORMAT
-   */
-  public static final String CARBON_TIMESTAMP_DEFAULT_FORMAT = "yyyy-MM-dd HH:mm:ss";
-  /**
-   * CARBON_TIMESTAMP_DEFAULT_FORMAT
-   */
-  public static final String CARBON_TIMESTAMP_FORMAT = "carbon.timestamp.format";
-  /**
-   * STORE_LOCATION_HDFS
-   */
-  public static final String STORE_LOCATION_HDFS = "carbon.storelocation.hdfs";
-  /**
-   * STORE_LOCATION_TEMP_PATH
-   */
-  public static final String STORE_LOCATION_TEMP_PATH = "carbon.tempstore.location";
-  /**
-   * IS_COLUMNAR_STORAGE_DEFAULTVALUE
-   */
-  public static final String IS_COLUMNAR_STORAGE_DEFAULTVALUE = "true";
-  /**
-   * DIMENSION_SPLIT_VALUE_IN_COLUMNAR_DEFAULTVALUE
-   */
-  public static final String DIMENSION_SPLIT_VALUE_IN_COLUMNAR_DEFAULTVALUE = "1";
-  /**
-   * IS_FULLY_FILLED_BITS_DEFAULT_VALUE
-   */
-  public static final String IS_FULLY_FILLED_BITS_DEFAULT_VALUE = "true";
-  /**
-   * IS_INT_BASED_INDEXER
-   */
-  public static final String AGGREAGATE_COLUMNAR_KEY_BLOCK = "aggregate.columnar.keyblock";
-  /**
-   * IS_INT_BASED_INDEXER_DEFAULTVALUE
-   */
-  public static final String AGGREAGATE_COLUMNAR_KEY_BLOCK_DEFAULTVALUE = "true";
-  /**
-   * TIME_STAT_UTIL_TYPE
-   */
-  public static final String ENABLE_DATA_LOADING_STATISTICS = "enable.data.loading.statistics";
-  /**
-   * TIME_STAT_UTIL_TYPE_DEFAULT
-   */
-  public static final String ENABLE_DATA_LOADING_STATISTICS_DEFAULT = "false";
-  /**
-   * IS_INT_BASED_INDEXER
-   */
-  public static final String HIGH_CARDINALITY_VALUE = "high.cardinality.value";
-  /**
-   * IS_INT_BASED_INDEXER_DEFAULTVALUE
-   */
-  public static final String HIGH_CARDINALITY_VALUE_DEFAULTVALUE = "100000";
-  /**
-   * CONSTANT_SIZE_TEN
-   */
-  public static final int CONSTANT_SIZE_TEN = 10;
-  /**
-   * LEVEL_METADATA_FILE
-   */
-  public static final String LEVEL_METADATA_FILE = "levelmetadata_";
-  public static final String ENABLE_BASE64_ENCODING = "enable.base64.encoding";
-  public static final String ENABLE_BASE64_ENCODING_DEFAULT = "false";
-  /**
-   * LOAD_STATUS SUCCESS
-   */
-  public static final String STORE_LOADSTATUS_SUCCESS = "Success";
-  /**
-   * LOAD_STATUS FAILURE
-   */
-  public static final String STORE_LOADSTATUS_FAILURE = "Failure";
-  /**
-   * LOAD_STATUS PARTIAL_SUCCESS
-   */
-  public static final String STORE_LOADSTATUS_PARTIAL_SUCCESS = "Partial Success";
-  /**
-   * LOAD_STATUS
-   */
-  public static final String CARBON_METADATA_EXTENSION = ".metadata";
-  /**
-   * LOAD_STATUS
-   */
-  public static final String CARBON_DEFAULT_STREAM_ENCODEFORMAT = "UTF-8";
-  /**
-   * AGGREGATE_TABLE_START_TAG
-   */
-  public static final String AGGREGATE_TABLE_START_TAG = "agg";
-  /**
-   * COMMA
-   */
-  public static final String COMMA = ",";
-  /**
-   * UNDERSCORE
-   */
-  public static final String UNDERSCORE = "_";
-  /**
-   * POINT
-   */
-  public static final String POINT = ".";
-  /**
-   * File separator
-   */
-  public static final String FILE_SEPARATOR = "/";
-  /**
-   * MAX_QUERY_EXECUTION_TIME
-   */
-  public static final String MAX_QUERY_EXECUTION_TIME = "max.query.execution.time";
-  /**
-   * CARBON_TIMESTAMP
-   */
-  public static final String CARBON_TIMESTAMP = "dd-MM-yyyy HH:mm:ss";
-  /**
-   * METADATA_LOCK
-   */
-  public static final String METADATA_LOCK = "meta.lock";
-  /**
-   * NUMBER_OF_TRIES_FOR_LOAD_METADATA_LOCK
-   */
-  public static final int NUMBER_OF_TRIES_FOR_LOAD_METADATA_LOCK_DEFAULT = 3;
-  /**
-   * MAX_TIMEOUT_FOR_LOAD_METADATA_LOCK
-   */
-  public static final int MAX_TIMEOUT_FOR_LOAD_METADATA_LOCK_DEFAULT = 5;
-  /**
-   * NUMBER_OF_TRIES_FOR_LOAD_METADATA_LOCK
-   */
-  public static final String NUMBER_OF_TRIES_FOR_LOAD_METADATA_LOCK =
-      "carbon.load.metadata.lock.retries";
-  /**
-   * MAX_TIMEOUT_FOR_LOAD_METADATA_LOCK
-   */
-  public static final String MAX_TIMEOUT_FOR_LOAD_METADATA_LOCK =
-      "carbon.load.metadata.lock.retry.timeout.sec";
-  /**
-   * MARKED_FOR_DELETION
-   */
-  public static final String MARKED_FOR_DELETE = "Marked for Delete";
-  public static final String MARKED_FOR_UPDATE = "Marked for Update";
-  public static final String STRING_TYPE = "StringType";
-  public static final String INTEGER_TYPE = "IntegerType";
-  public static final String LONG_TYPE = "LongType";
-  public static final String DOUBLE_TYPE = "DoubleType";
-  public static final String FLOAT_TYPE = "FloatType";
-  public static final String DATE_TYPE = "DateType";
-  public static final String BOOLEAN_TYPE = "BooleanType";
-  public static final String TIMESTAMP_TYPE = "TimestampType";
-  public static final String BYTE_TYPE = "ByteType";
-  public static final String SHORT_TYPE = "ShortType";
-  public static final String BINARY_TYPE = "BinaryType";
-  public static final String DECIMAL_TYPE = "DecimalType";
-  public static final String STRING = "String";
-  public static final String COLUMNAR = "columnar";
-
-  public static final String INTEGER = "Integer";
-  public static final String SHORT = "Short";
-  public static final String NUMERIC = "Numeric";
-  public static final String TIMESTAMP = "Timestamp";
-  public static final String ARRAY = "ARRAY";
-  public static final String STRUCT = "STRUCT";
-  public static final String INCLUDE = "include";
-  public static final String FROM = "from";
-  public static final String WITH = "with";
-  /**
-   * FACT_UPDATE_EXTENSION.
-   */
-  public static final String FACT_UPDATE_EXTENSION = ".carbondata_update";
-  public static final String FACT_DELETE_EXTENSION = "_delete";
-  /**
-   * MARKED_FOR_UPDATION
-   */
-  public static final String FACT_FILE_UPDATED = "update";
-  /**
-   * MAX_QUERY_EXECUTION_TIME
-   */
-  public static final int DEFAULT_MAX_QUERY_EXECUTION_TIME = 60;
-  /**
-   * LOADMETADATA_FILENAME
-   */
-  public static final String LOADMETADATA_FILENAME = "tablestatus";
-  public static final String SUM_DISTINCT = "sum-distinct";
-  /**
-   * INMEMORY_REOCRD_SIZE
-   */
-  public static final String INMEMORY_REOCRD_SIZE = "carbon.inmemory.record.size";
-  public static final int INMEMORY_REOCRD_SIZE_DEFAULT = 240000;
-
-  /**
-   * INMEMORY_REOCRD_SIZE
-   */
-  public static final String DETAIL_QUERY_BATCH_SIZE = "carbon.detail.batch.size";
-  public static final int DETAIL_QUERY_BATCH_SIZE_DEFAULT = 10000;
-  /**
-   * SPILL_OVER_DISK_PATH
-   */
-  public static final String SCHEMAS_MODIFIED_TIME_FILE = "modifiedTime.mdt";
-  public static final String DEFAULT_INVISIBLE_DUMMY_MEASURE = "default_dummy_measure";
-  /**
-   * max level cache size upto which level cache will be loaded in memory
-   */
-  public static final String CARBON_MAX_LEVEL_CACHE_SIZE = "carbon.max.level.cache.size";
-  /**
-   * max level cache size default value in GB
-   */
-  public static final String CARBON_MAX_LEVEL_CACHE_SIZE_DEFAULT = "-1";
-  /**
-   * DOUBLE_VALUE_MEASURE
-   */
-  public static final char SUM_COUNT_VALUE_MEASURE = 'n';
-  /**
-   * BYTE_VALUE_MEASURE
-   */
-  public static final char BYTE_VALUE_MEASURE = 'c';
-  /**
-   * BIG_DECIMAL_MEASURE
-   */
-  public static final char BIG_DECIMAL_MEASURE = 'b';
-
-  /**
-   * BIG_INT_MEASURE
-   */
-  public static final char BIG_INT_MEASURE = 'l';
-
-  /**
-   * This determines the size of array to be processed in data load steps. one
-   * for dimensions , one of ignore dictionary dimensions , one for measures.
-   */
-  public static final int ARRAYSIZE = 3;
-  /**
-   * CARBON_PREFETCH_BUFFERSIZE
-   */
-  public static final int CARBON_PREFETCH_BUFFERSIZE = 20000;
-  /**
-   * CARBON_PREFETCH_IN_MERGE
-   */
-  public static final boolean CARBON_PREFETCH_IN_MERGE_VALUE = false;
-  /**
-   * TEMPWRITEFILEEXTENSION
-   */
-  public static final String TEMPWRITEFILEEXTENSION = ".write";
-  /**
-   * ENABLE_AUTO_LOAD_MERGE
-   */
-  public static final String ENABLE_AUTO_LOAD_MERGE = "carbon.enable.auto.load.merge";
-  /**
-   * DEFAULT_ENABLE_AUTO_LOAD_MERGE
-   */
-  public static final String DEFAULT_ENABLE_AUTO_LOAD_MERGE = "false";
-
-  /**
-   * ZOOKEEPER_ENABLE_LOCK if this is set to true then zookeeper will be used to handle locking
-   * mechanism of carbon
-   */
-  public static final String LOCK_TYPE = "carbon.lock.type";
-
-  /**
-   * ZOOKEEPER_ENABLE_DEFAULT the default value for zookeeper will be true for carbon
-   */
-  public static final String LOCK_TYPE_DEFAULT = "LOCALLOCK";
-
-  /**
-   * ZOOKEEPER_LOCATION this is the location in zookeeper file system where locks are created.
-   * mechanism of carbon
-   */
-  public static final String ZOOKEEPER_LOCATION = "/CarbonLocks";
-
-  /**
-   * maximum dictionary chunk size that can be kept in memory while writing dictionary file
-   */
-  public static final String DICTIONARY_ONE_CHUNK_SIZE = "carbon.dictionary.chunk.size";
-
-  /**
-   * dictionary chunk default size
-   */
-  public static final String DICTIONARY_ONE_CHUNK_SIZE_DEFAULT = "10000";
-
-  /**
-   * xxhash algorithm property for hashmap
-   */
-  public static final String ENABLE_XXHASH = "carbon.enableXXHash";
-
-  /**
-   * xxhash algorithm property for hashmap. Default value false
-   */
-  public static final String ENABLE_XXHASH_DEFAULT = "true";
-
-  /**
-   * default charset to be used for reading and writing
-   */
-  public static final String DEFAULT_CHARSET = "UTF-8";
-
-  /**
-   * surrogate key that will be sent whenever in the dictionary chunks
-   * a valid surrogate key is not found for a given dictionary value
-   */
-  public static final int INVALID_SURROGATE_KEY = -1;
-
-  /**
-   * surrogate key for MEMBER_DEFAULT_VAL
-   */
-  public static final int MEMBER_DEFAULT_VAL_SURROGATE_KEY = 1;
-
-  public static final String INVALID_SEGMENT_ID = "-1";
-
-  /**
-   * Size of Major Compaction in MBs
-   */
-  public static final String MAJOR_COMPACTION_SIZE = "carbon.major.compaction.size";
-
-  /**
-   * By default size of major compaction in MBs.
-   */
-  public static final String DEFAULT_MAJOR_COMPACTION_SIZE = "1024";
-
-  /**
-   * This property is used to tell how many segments to be preserved from merging.
-   */
-  public static final java.lang.String PRESERVE_LATEST_SEGMENTS_NUMBER =
-      "carbon.numberof.preserve.segments";
-
-  /**
-   * If preserve property is enabled then 2 segments will be preserved.
-   */
-  public static final String DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER = "0";
-
-  /**
-   * This property will determine the loads of how many days can be compacted.
-   */
-  public static final java.lang.String DAYS_ALLOWED_TO_COMPACT = "carbon.allowed.compaction.days";
-
-  /**
-   * Default value of 1 day loads can be compacted
-   */
-  public static final String DEFAULT_DAYS_ALLOWED_TO_COMPACT = "0";
-
-  /**
-   * space reserved for writing block meta data in carbon data file
-   */
-  public static final String CARBON_BLOCK_META_RESERVED_SPACE =
-      "carbon.block.meta.size.reserved.percentage";
-
-  /**
-   * default value for space reserved for writing block meta data in carbon data file
-   */
-  public static final String CARBON_BLOCK_META_RESERVED_SPACE_DEFAULT = "10";
-
-  /**
-   * property to enable min max during filter query
-   */
-  public static final String CARBON_QUERY_MIN_MAX_ENABLED = "carbon.enableMinMax";
-
-  /**
-   * default value to enable min or max during filter query execution
-   */
-  public static final String MIN_MAX_DEFAULT_VALUE = "true";
-
-  /**
-   * this variable is to enable/disable prefetch of data during merge sort while
-   * reading data from sort temp files
-   */
-  public static final String CARBON_MERGE_SORT_PREFETCH = "carbon.merge.sort.prefetch";
-  public static final String CARBON_MERGE_SORT_PREFETCH_DEFAULT = "true";
-
-  /**
-   *  default name of data base
-   */
-  public static final String DATABASE_DEFAULT_NAME = "default";
-
-  // tblproperties
-  public static final String COLUMN_GROUPS = "column_groups";
-  public static final String DICTIONARY_EXCLUDE = "dictionary_exclude";
-  public static final String DICTIONARY_INCLUDE = "dictionary_include";
-  public static final String PARTITIONCLASS = "partitionclass";
-  public static final String PARTITIONCOUNT = "partitioncount";
-  public static final String COLUMN_PROPERTIES = "columnproperties";
-
-  /**
-   * this variable is to enable/disable identify high cardinality during first data loading
-   */
-  public static final String HIGH_CARDINALITY_IDENTIFY_ENABLE =
-      "high.cardinality.identify.enable";
-  public static final String HIGH_CARDINALITY_IDENTIFY_ENABLE_DEFAULT = "true";
-
-  /**
-   * threshold of high cardinality
-   */
-  public static final String HIGH_CARDINALITY_THRESHOLD = "high.cardinality.threshold";
-  public static final String HIGH_CARDINALITY_THRESHOLD_DEFAULT = "1000000";
-  public static final int HIGH_CARDINALITY_THRESHOLD_MIN = 10000;
-
-  /**
-   * percentage of cardinality in row count
-   */
-  public static final String HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE =
-      "high.cardinality.row.count.percentage";
-  public static final String HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT = "80";
-
-  /**
-   * 16 mb size
-   */
-  public static final long CARBON_16MB = 16*1024*1024;
-  /**
-   * 256 mb size
-   */
-  public static final long CARBON_256MB = 256*1024*1024;
-
-  /**
-   * Data type String.
-   */
-  public static final String DATATYPE_STRING = "STRING";
-
-  /**
-   * SEGMENT_COMPACTED is property to indicate whether seg is compacted or not.
-   */
-  public static final String SEGMENT_COMPACTED = "Compacted";
-
-  /**
-   * property for number of core to load the blocks in driver
-   */
-  public static final String NUMBER_OF_CORE_TO_LOAD_DRIVER_SEGMENT =
-      "no.of.cores.to.load.blocks.in.driver";
-  /**
-   * default number of cores
-   */
-  public static final int NUMBER_OF_CORE_TO_LOAD_DRIVER_SEGMENT_DEFAULT_VALUE = 10;
-
-  /**
-   * ZOOKEEPERLOCK TYPE
-   */
-  public static final String CARBON_LOCK_TYPE_ZOOKEEPER =
-      "ZOOKEEPERLOCK";
-
-  /**
-   * LOCALLOCK TYPE
-   */
-  public static final String CARBON_LOCK_TYPE_LOCAL =
-      "LOCALLOCK";
-
-  /**
-   * HDFSLOCK TYPE
-   */
-  public static final String CARBON_LOCK_TYPE_HDFS =
-      "HDFSLOCK";
-
-  /**
-   * Lock file in zoo keeper will be of this name.
-   */
-  public static final String ZOOKEEPER_LOCK = "zookeeperLock";
-
-  /**
-   * Invalid filter member log string
-   */
-  public static final String FILTER_INVALID_MEMBER = " Invalid Record(s) are present "
-                                                     + "while filter evaluation. ";
-
-  /**
-   * Number of unmerged segments to be merged.
-   */
-  public static final String COMPACTION_SEGMENT_LEVEL_THRESHOLD =
-      "carbon.compaction.level.threshold";
-
-  /**
-   * Default count for Number of segments to be merged in levels is 4,3
-   */
-  public static final String DEFAULT_SEGMENT_LEVEL_THRESHOLD = "4,3";
-
-  /**
-   * default location of the carbon metastore db
-   */
-  public static final String METASTORE_LOCATION_DEFAULT_VAL = "../carbon.metastore";
-
-  /**
-   * hive connection url
-   */
-  public static final String HIVE_CONNECTION_URL = "javax.jdo.option.ConnectionURL";
-
-  /**
-   * Rocord size in case of compaction.
-   */
-  public static final int COMPACTION_INMEMORY_RECORD_SIZE = 120000;
-
-  /**
-   * If the level 2 compaction is done in minor then new compacted segment will end with .2
-   */
-  public static String LEVEL2_COMPACTION_INDEX = ".2";
-
-  /**
-   * Indicates compaction
-   */
-  public static String COMPACTION_KEY_WORD = "COMPACTION";
-
-  /**
-   * hdfs temporary directory key
-   */
-  public static final String HDFS_TEMP_LOCATION = "hadoop.tmp.dir";
-
-  /**
-   * zookeeper url key
-   */
-  public static final String ZOOKEEPER_URL = "spark.deploy.zookeeper.url";
-
-  private CarbonCommonConstants() {
-  }
-}
-
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/constants/IgnoreDictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/constants/IgnoreDictionary.java b/core/src/main/java/org/carbondata/core/constants/IgnoreDictionary.java
deleted file mode 100644
index 9d00dea..0000000
--- a/core/src/main/java/org/carbondata/core/constants/IgnoreDictionary.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.constants;
-
-/**
- * This enum is used for determining the indexes of the
- * dimension,ignoreDictionary,measure columns.
- */
-public enum IgnoreDictionary {
-  /**
-   * POSITION WHERE DIMENSIONS R STORED IN OBJECT ARRAY.
-   */
-  DIMENSION_INDEX_IN_ROW(0),
-
-  /**
-   * POSITION WHERE BYTE[] (high cardinality) IS STORED IN OBJECT ARRAY.
-   */
-  BYTE_ARRAY_INDEX_IN_ROW(1),
-
-  /**
-   * POSITION WHERE MEASURES R STORED IN OBJECT ARRAY.
-   */
-  MEASURES_INDEX_IN_ROW(2);
-
-  private final int index;
-
-  IgnoreDictionary(int index) {
-    this.index = index;
-  }
-
-  public int getIndex() {
-    return this.index;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/FileHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/FileHolder.java b/core/src/main/java/org/carbondata/core/datastorage/store/FileHolder.java
deleted file mode 100644
index b6c7480..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/FileHolder.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store;
-
-
-public interface FileHolder {
-  /**
-   * This method will be used to read the byte array from file based on offset
-   * and length(number of bytes) need to read
-   *
-   * @param filePath fully qualified file path
-   * @param offset   reading start position,
-   * @param length   number of bytes to be read
-   * @return read byte array
-   */
-  byte[] readByteArray(String filePath, long offset, int length);
-
-  /**
-   * This method will be used to read the byte array from file based on length(number of bytes)
-   *
-   * @param filePath fully qualified file path
-   * @param length   number of bytes to be read
-   * @return read byte array
-   */
-  byte[] readByteArray(String filePath, int length);
-
-  /**
-   * This method will be used to read int from file from postion(offset), here
-   * length will be always 4 bacause int byte size if 4
-   *
-   * @param filePath fully qualified file path
-   * @param offset   reading start position,
-   * @return read int
-   */
-  int readInt(String filePath, long offset);
-
-  /**
-   * This method will be used to read long from file from postion(offset), here
-   * length will be always 8 bacause int byte size is 8
-   *
-   * @param filePath fully qualified file path
-   * @param offset   reading start position,
-   * @return read long
-   */
-  long readLong(String filePath, long offset);
-
-  /**
-   * This method will be used to read int from file from postion(offset), here
-   * length will be always 4 bacause int byte size if 4
-   *
-   * @param filePath fully qualified file path
-   * @return read int
-   */
-  int readInt(String filePath);
-
-  /**
-   * This method will be used to read long value from file from postion(offset), here
-   * length will be always 8 because long byte size if 4
-   *
-   * @param filePath fully qualified file path
-   * @param offset   reading start position,
-   * @return read long
-   */
-  long readDouble(String filePath, long offset);
-
-  /**
-   * This method will be used to close all the streams currently present in the cache
-   */
-  void finish();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/MeasureDataWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/MeasureDataWrapper.java b/core/src/main/java/org/carbondata/core/datastorage/store/MeasureDataWrapper.java
deleted file mode 100644
index 889cf93..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/MeasureDataWrapper.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store;
-
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-
-/**
- * MeasureDataWrapper, interface.
- */
-public interface MeasureDataWrapper {
-  CarbonReadDataHolder[] getValues();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/NodeKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/NodeKeyStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/NodeKeyStore.java
deleted file mode 100644
index b8553c9..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/NodeKeyStore.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store;
-
-public interface NodeKeyStore {
-  /**
-   * This method will be used to get the actual mdkeys array present in the
-   * store store
-   *
-   * @param fileHolder
-   * @return mdkey
-   */
-  byte[] getBackArray(FileHolder fileHolder);
-
-  /**
-   * This method will be used to insert mdkey to store
-   *
-   * @param index index of mdkey
-   * @param value mdkey
-   */
-  void put(int index, byte[] value);
-
-  /**
-   * This method will be used to get the writable key array.
-   * writable key array will hold below information:
-   * <size of key array><key array>
-   * total length will be 4 bytes for size + key array length
-   *
-   * @return writable array (compressed or normal)
-   */
-  byte[] getWritableKeyArray();
-
-  /**
-   * This method will be used to get the mdkkey array based on index
-   *
-   * @param index      index in store
-   * @param fileHolder file holder will be used to read the file
-   * @return mdkey
-   */
-  byte[] get(int index, FileHolder fileHolder);
-
-  /**
-   * This method will clear the store and create the new empty store
-   */
-  void clear();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/NodeMeasureDataStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/NodeMeasureDataStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/NodeMeasureDataStore.java
deleted file mode 100644
index 13145db..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/NodeMeasureDataStore.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store;
-
-import org.carbondata.core.datastorage.store.dataholder.CarbonWriteDataHolder;
-
-public interface NodeMeasureDataStore {
-  /**
-   * This method will be used to get the writable key array.
-   * writable measure data array will hold below information:
-   * <size of measure data array><measure data array>
-   * total length will be 4 bytes for size + measure data array length
-   *
-   * @return writable array (compressed or normal)
-   */
-  byte[][] getWritableMeasureDataArray(CarbonWriteDataHolder[] dataHolderArray);
-
-  MeasureDataWrapper getBackData(int[] cols, FileHolder fileHolder);
-
-  MeasureDataWrapper getBackData(int cols, FileHolder fileHolder);
-
-  short getLength();
-
-}
\ No newline at end of file


[05/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/FilterUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/FilterUtil.java b/core/src/main/java/org/carbondata/scan/filter/FilterUtil.java
deleted file mode 100644
index e172d25..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/FilterUtil.java
+++ /dev/null
@@ -1,1395 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter;
-
-import java.math.BigDecimal;
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-import java.text.SimpleDateFormat;
-import java.util.*;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.cache.Cache;
-import org.carbondata.core.cache.CacheProvider;
-import org.carbondata.core.cache.CacheType;
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.cache.dictionary.DictionaryChunksWrapper;
-import org.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
-import org.carbondata.core.cache.dictionary.ForwardDictionary;
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.keygenerator.KeyGenException;
-import org.carbondata.core.keygenerator.KeyGenerator;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.core.util.CarbonProperties;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.core.util.CarbonUtilException;
-import org.carbondata.core.util.DataTypeUtil;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.LiteralExpression;
-import org.carbondata.scan.expression.UnknownExpression;
-import org.carbondata.scan.expression.conditional.ListExpression;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.executer.*;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.FilterExecuterType;
-import org.carbondata.scan.filter.intf.RowImpl;
-import org.carbondata.scan.filter.intf.RowIntf;
-import org.carbondata.scan.filter.resolver.FilterResolverIntf;
-import org.carbondata.scan.filter.resolver.RowLevelFilterResolverImpl;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-
-public final class FilterUtil {
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(FilterUtil.class.getName());
-
-  private FilterUtil() {
-
-  }
-
-  /**
-   * Pattern used : Visitor Pattern
-   * Method will create filter executer tree based on the filter resolved tree,
-   * in this algorithm based on the resolver instance the executers will be visited
-   * and the resolved surrogates will be converted to keys
-   *
-   * @param filterExpressionResolverTree
-   * @param segmentProperties
-   * @return FilterExecuter instance
-   */
-  private static FilterExecuter createFilterExecuterTree(
-      FilterResolverIntf filterExpressionResolverTree, SegmentProperties segmentProperties,
-      Map<Integer, GenericQueryType> complexDimensionInfoMap) {
-    FilterExecuterType filterExecuterType = filterExpressionResolverTree.getFilterExecuterType();
-    if (null != filterExecuterType) {
-      switch (filterExecuterType) {
-        case INCLUDE:
-          return getIncludeFilterExecuter(
-              filterExpressionResolverTree.getDimColResolvedFilterInfo(), segmentProperties);
-        case EXCLUDE:
-          return getExcludeFilterExecuter(
-              filterExpressionResolverTree.getDimColResolvedFilterInfo(), segmentProperties);
-        case OR:
-          return new OrFilterExecuterImpl(
-              createFilterExecuterTree(filterExpressionResolverTree.getLeft(), segmentProperties,
-                  complexDimensionInfoMap),
-              createFilterExecuterTree(filterExpressionResolverTree.getRight(), segmentProperties,
-                  complexDimensionInfoMap));
-        case AND:
-          return new AndFilterExecuterImpl(
-              createFilterExecuterTree(filterExpressionResolverTree.getLeft(), segmentProperties,
-                  complexDimensionInfoMap),
-              createFilterExecuterTree(filterExpressionResolverTree.getRight(), segmentProperties,
-                  complexDimensionInfoMap));
-        case RESTRUCTURE:
-          return new RestructureFilterExecuterImpl(
-              filterExpressionResolverTree.getDimColResolvedFilterInfo(),
-              segmentProperties.getDimensionKeyGenerator());
-        case ROWLEVEL_LESSTHAN:
-        case ROWLEVEL_LESSTHAN_EQUALTO:
-        case ROWLEVEL_GREATERTHAN_EQUALTO:
-        case ROWLEVEL_GREATERTHAN:
-          return RowLevelRangeTypeExecuterFacory
-              .getRowLevelRangeTypeExecuter(filterExecuterType, filterExpressionResolverTree,
-                  segmentProperties);
-        case ROWLEVEL:
-        default:
-          return new RowLevelFilterExecuterImpl(
-              ((RowLevelFilterResolverImpl) filterExpressionResolverTree)
-                  .getDimColEvaluatorInfoList(),
-              ((RowLevelFilterResolverImpl) filterExpressionResolverTree)
-                  .getMsrColEvalutorInfoList(),
-              ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getFilterExpresion(),
-              ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
-              segmentProperties, complexDimensionInfoMap);
-
-      }
-    }
-    return new RowLevelFilterExecuterImpl(
-        ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getDimColEvaluatorInfoList(),
-        ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getMsrColEvalutorInfoList(),
-        ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getFilterExpresion(),
-        ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
-        segmentProperties, complexDimensionInfoMap);
-
-  }
-
-  /**
-   * It gives filter executer based on columnar or column group
-   *
-   * @param dimColResolvedFilterInfo
-   * @param segmentProperties
-   * @return
-   */
-  private static FilterExecuter getIncludeFilterExecuter(
-      DimColumnResolvedFilterInfo dimColResolvedFilterInfo, SegmentProperties segmentProperties) {
-
-    if (dimColResolvedFilterInfo.getDimension().isColumnar()) {
-      return new IncludeFilterExecuterImpl(dimColResolvedFilterInfo, segmentProperties);
-    } else {
-      return new IncludeColGroupFilterExecuterImpl(dimColResolvedFilterInfo, segmentProperties);
-    }
-  }
-
-  /**
-   * It gives filter executer based on columnar or column group
-   *
-   * @param dimColResolvedFilterInfo
-   * @param segmentProperties
-   * @return
-   */
-  private static FilterExecuter getExcludeFilterExecuter(
-      DimColumnResolvedFilterInfo dimColResolvedFilterInfo, SegmentProperties segmentProperties) {
-
-    if (dimColResolvedFilterInfo.getDimension().isColumnar()) {
-      return new ExcludeFilterExecuterImpl(dimColResolvedFilterInfo, segmentProperties);
-    } else {
-      return new ExcludeColGroupFilterExecuterImpl(dimColResolvedFilterInfo, segmentProperties);
-    }
-  }
-
-  /**
-   * This method will check if a given expression contains a column expression
-   * recursively.
-   *
-   * @return
-   */
-  public static boolean checkIfExpressionContainsColumn(Expression expression) {
-    if (expression instanceof ColumnExpression) {
-      return true;
-    }
-    for (Expression child : expression.getChildren()) {
-      if (checkIfExpressionContainsColumn(child)) {
-        return true;
-      }
-    }
-
-    return false;
-  }
-
-  /**
-   * This method will check if a given expression contains a column expression
-   * recursively.
-   *
-   * @return
-   */
-  public static boolean checkIfLeftExpressionRequireEvaluation(Expression expression) {
-    if (expression.getFilterExpressionType() == ExpressionType.UNKNOWN
-        || !(expression instanceof ColumnExpression)) {
-      return true;
-    }
-    for (Expression child : expression.getChildren()) {
-      if (checkIfLeftExpressionRequireEvaluation(child)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * This method will check if a given literal expression is not a timestamp datatype
-   * recursively.
-   *
-   * @return
-   */
-  public static boolean checkIfDataTypeNotTimeStamp(Expression expression) {
-    if (expression.getFilterExpressionType() == ExpressionType.LITERAL) {
-      if (!(((LiteralExpression) expression).getLiteralExpDataType()
-          == DataType.TIMESTAMP)) {
-        return true;
-      }
-    }
-    for (Expression child : expression.getChildren()) {
-      if (checkIfDataTypeNotTimeStamp(child)) {
-        return true;
-      }
-    }
-    return false;
-  }
-  /**
-   * This method will check if a given expression contains a column expression
-   * recursively.
-   *
-   * @return
-   */
-  public static boolean checkIfRightExpressionRequireEvaluation(Expression expression) {
-    if (expression.getFilterExpressionType() == ExpressionType.UNKNOWN
-        || !(expression instanceof LiteralExpression) && !(expression instanceof ListExpression)) {
-      return true;
-    }
-    for (Expression child : expression.getChildren()) {
-      if (checkIfRightExpressionRequireEvaluation(child)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * method will get the masked keys based on the keys generated from surrogates.
-   *
-   * @param ranges
-   * @param key
-   * @return byte[]
-   */
-  private static byte[] getMaskedKey(int[] ranges, byte[] key) {
-    byte[] maskkey = new byte[ranges.length];
-
-    for (int i = 0; i < maskkey.length; i++) {
-      maskkey[i] = key[ranges[i]];
-    }
-    return maskkey;
-  }
-
-  /**
-   * This method will return the ranges for the masked Bytes based on the key
-   * Generator.
-   *
-   * @param queryDimensionsOrdinal
-   * @param generator
-   * @return
-   */
-  private static int[] getRangesForMaskedByte(int queryDimensionsOrdinal, KeyGenerator generator) {
-    Set<Integer> integers = new TreeSet<Integer>();
-    int[] range = generator.getKeyByteOffsets(queryDimensionsOrdinal);
-    for (int j = range[0]; j <= range[1]; j++) {
-      integers.add(j);
-    }
-
-    int[] byteIndexs = new int[integers.size()];
-    int j = 0;
-    for (Iterator<Integer> iterator = integers.iterator(); iterator.hasNext(); ) {
-      Integer integer = iterator.next();
-      byteIndexs[j++] = integer.intValue();
-    }
-    return byteIndexs;
-  }
-
-  /**
-   * This method will get the no dictionary data based on filters and same
-   * will be in DimColumnFilterInfo
-   *
-   * @param tableIdentifier
-   * @param columnExpression
-   * @param evaluateResultListFinal
-   * @param isIncludeFilter
-   * @return DimColumnFilterInfo
-   */
-  public static DimColumnFilterInfo getNoDictionaryValKeyMemberForFilter(
-      AbsoluteTableIdentifier tableIdentifier, ColumnExpression columnExpression,
-      List<String> evaluateResultListFinal, boolean isIncludeFilter) {
-    List<byte[]> filterValuesList = new ArrayList<byte[]>(20);
-    for (String result : evaluateResultListFinal) {
-      filterValuesList.add(result.getBytes(Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET)));
-    }
-
-    Comparator<byte[]> filterNoDictValueComaparator = new Comparator<byte[]>() {
-
-      @Override public int compare(byte[] filterMember1, byte[] filterMember2) {
-        // TODO Auto-generated method stub
-        return ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterMember1, filterMember2);
-      }
-
-    };
-    Collections.sort(filterValuesList, filterNoDictValueComaparator);
-    DimColumnFilterInfo columnFilterInfo = null;
-    if (filterValuesList.size() > 0) {
-      columnFilterInfo = new DimColumnFilterInfo();
-      columnFilterInfo.setIncludeFilter(isIncludeFilter);
-      columnFilterInfo.setFilterListForNoDictionaryCols(filterValuesList);
-
-    }
-    return columnFilterInfo;
-  }
-
-  /**
-   * Method will prepare the  dimfilterinfo instance by resolving the filter
-   * expression value to its respective surrogates.
-   *
-   * @param tableIdentifier
-   * @param columnExpression
-   * @param evaluateResultList
-   * @param isIncludeFilter
-   * @return
-   * @throws QueryExecutionException
-   */
-  public static DimColumnFilterInfo getFilterValues(AbsoluteTableIdentifier tableIdentifier,
-      ColumnExpression columnExpression, List<String> evaluateResultList, boolean isIncludeFilter)
-      throws QueryExecutionException {
-    Dictionary forwardDictionary = null;
-    try {
-      // Reading the dictionary value from cache.
-      forwardDictionary =
-          getForwardDictionaryCache(tableIdentifier, columnExpression.getDimension());
-      return getFilterValues(columnExpression, evaluateResultList, forwardDictionary,
-          isIncludeFilter);
-    } finally {
-      CarbonUtil.clearDictionaryCache(forwardDictionary);
-    }
-  }
-
-  /**
-   * Method will prepare the  dimfilterinfo instance by resolving the filter
-   * expression value to its respective surrogates.
-   *
-   * @param columnExpression
-   * @param evaluateResultList
-   * @param forwardDictionary
-   * @param isIncludeFilter
-   * @return
-   * @throws QueryExecutionException
-   */
-  private static DimColumnFilterInfo getFilterValues(ColumnExpression columnExpression,
-      List<String> evaluateResultList, Dictionary forwardDictionary, boolean isIncludeFilter)
-      throws QueryExecutionException {
-    sortFilterModelMembers(columnExpression, evaluateResultList);
-    List<Integer> surrogates =
-        new ArrayList<Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    // Reading the dictionary value from cache.
-    getDictionaryValue(evaluateResultList, forwardDictionary, surrogates);
-    Collections.sort(surrogates);
-    DimColumnFilterInfo columnFilterInfo = null;
-    if (surrogates.size() > 0) {
-      columnFilterInfo = new DimColumnFilterInfo();
-      columnFilterInfo.setIncludeFilter(isIncludeFilter);
-      columnFilterInfo.setFilterList(surrogates);
-    }
-    return columnFilterInfo;
-  }
-
-  /**
-   * This API will get the Dictionary value for the respective filter member
-   * string.
-   *
-   * @param evaluateResultList filter value
-   * @param surrogates
-   * @throws QueryExecutionException
-   */
-  private static void getDictionaryValue(List<String> evaluateResultList,
-      Dictionary forwardDictionary, List<Integer> surrogates) throws QueryExecutionException {
-    ((ForwardDictionary) forwardDictionary)
-        .getSurrogateKeyByIncrementalSearch(evaluateResultList, surrogates);
-  }
-
-  /**
-   * This method will get all the members of column from the forward dictionary
-   * cache, this method will be basically used in row level filter resolver.
-   *
-   * @param tableIdentifier
-   * @param expression
-   * @param columnExpression
-   * @param isIncludeFilter
-   * @return DimColumnFilterInfo
-   * @throws FilterUnsupportedException
-   * @throws QueryExecutionException
-   */
-  public static DimColumnFilterInfo getFilterListForAllValues(
-      AbsoluteTableIdentifier tableIdentifier, Expression expression,
-      final ColumnExpression columnExpression, boolean isIncludeFilter)
-      throws FilterUnsupportedException {
-    Dictionary forwardDictionary = null;
-    List<String> evaluateResultListFinal = new ArrayList<String>(20);
-    DictionaryChunksWrapper dictionaryWrapper = null;
-    try {
-      forwardDictionary =
-          getForwardDictionaryCache(tableIdentifier, columnExpression.getDimension());
-      dictionaryWrapper = forwardDictionary.getDictionaryChunks();
-      while (dictionaryWrapper.hasNext()) {
-        byte[] columnVal = dictionaryWrapper.next();
-        try {
-          RowIntf row = new RowImpl();
-          String stringValue =
-              new String(columnVal, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
-          if (stringValue.equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
-            stringValue = null;
-          }
-          row.setValues(new Object[] { DataTypeUtil.getDataBasedOnDataType(stringValue,
-              columnExpression.getCarbonColumn().getDataType()) });
-          Boolean rslt = expression.evaluate(row).getBoolean();
-          if (null != rslt && !(rslt ^ isIncludeFilter)) {
-            if (null == stringValue) {
-              evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
-            } else {
-              evaluateResultListFinal.add(stringValue);
-            }
-          }
-        } catch (FilterIllegalMemberException e) {
-          LOGGER.debug(e.getMessage());
-        }
-      }
-      return getFilterValues(columnExpression, evaluateResultListFinal, forwardDictionary,
-          isIncludeFilter);
-    } catch (QueryExecutionException e) {
-      throw new FilterUnsupportedException(e.getMessage());
-    } finally {
-      CarbonUtil.clearDictionaryCache(forwardDictionary);
-    }
-  }
-
-  private static void sortFilterModelMembers(final ColumnExpression columnExpression,
-      List<String> evaluateResultListFinal) {
-    Comparator<String> filterActualValueComaparator = new Comparator<String>() {
-
-      @Override public int compare(String filterMember1, String filterMember2) {
-        return compareFilterMembersBasedOnActualDataType(filterMember1, filterMember2,
-            columnExpression.getDataType());
-      }
-
-    };
-    Collections.sort(evaluateResultListFinal, filterActualValueComaparator);
-  }
-
-  /**
-   * Metahod will resolve the filter member to its respective surrogates by
-   * scanning the dictionary cache.
-   *
-   * @param tableIdentifier
-   * @param expression
-   * @param columnExpression
-   * @param isIncludeFilter
-   * @return
-   * @throws QueryExecutionException
-   * @throws FilterUnsupportedException
-   */
-  public static DimColumnFilterInfo getFilterList(AbsoluteTableIdentifier tableIdentifier,
-      Expression expression, ColumnExpression columnExpression, boolean isIncludeFilter)
-      throws QueryExecutionException, FilterUnsupportedException {
-    DimColumnFilterInfo resolvedFilterObject = null;
-    List<String> evaluateResultListFinal = new ArrayList<String>(20);
-    try {
-      List<ExpressionResult> evaluateResultList = expression.evaluate(null).getList();
-      for (ExpressionResult result : evaluateResultList) {
-        if (result.getString() == null) {
-          evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
-          continue;
-        }
-        evaluateResultListFinal.add(result.getString());
-      }
-
-      if (null != columnExpression.getCarbonColumn() && !columnExpression.getCarbonColumn()
-          .hasEncoding(Encoding.DICTIONARY)) {
-        resolvedFilterObject =
-            getNoDictionaryValKeyMemberForFilter(tableIdentifier, columnExpression,
-                evaluateResultListFinal, isIncludeFilter);
-      } else {
-        resolvedFilterObject =
-            getFilterValues(tableIdentifier, columnExpression, evaluateResultListFinal,
-                isIncludeFilter);
-      }
-    } catch (FilterIllegalMemberException e) {
-      LOGGER.audit(e.getMessage());
-    }
-    return resolvedFilterObject;
-  }
-
-  /**
-   * Method will prepare the  dimfilterinfo instance by resolving the filter
-   * expression value to its respective surrogates in the scenario of restructure.
-   *
-   * @param expression
-   * @param columnExpression
-   * @param defaultValues
-   * @param defaultSurrogate
-   * @return
-   * @throws FilterUnsupportedException
-   */
-  public static DimColumnFilterInfo getFilterListForRS(Expression expression,
-      ColumnExpression columnExpression, String defaultValues, int defaultSurrogate)
-      throws FilterUnsupportedException {
-    List<Integer> filterValuesList = new ArrayList<Integer>(20);
-    DimColumnFilterInfo columnFilterInfo = null;
-    // List<byte[]> filterValuesList = new ArrayList<byte[]>(20);
-    List<String> evaluateResultListFinal = new ArrayList<String>(20);
-    // KeyGenerator keyGenerator =
-    // KeyGeneratorFactory.getKeyGenerator(new int[] { defaultSurrogate });
-    try {
-      List<ExpressionResult> evaluateResultList = expression.evaluate(null).getList();
-      for (ExpressionResult result : evaluateResultList) {
-        if (result.getString() == null) {
-          evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
-          continue;
-        }
-        evaluateResultListFinal.add(result.getString());
-      }
-
-      for (int i = 0; i < evaluateResultListFinal.size(); i++) {
-        if (evaluateResultListFinal.get(i).equals(defaultValues)) {
-          filterValuesList.add(defaultSurrogate);
-          break;
-        }
-      }
-      if (filterValuesList.size() > 0) {
-        columnFilterInfo = new DimColumnFilterInfo();
-        columnFilterInfo.setFilterList(filterValuesList);
-      }
-    } catch (FilterIllegalMemberException e) {
-      LOGGER.audit(e.getMessage());
-    }
-    return columnFilterInfo;
-  }
-
-  /**
-   * This method will get the member based on filter expression evaluation from the
-   * forward dictionary cache, this method will be basically used in restructure.
-   *
-   * @param expression
-   * @param columnExpression
-   * @param defaultValues
-   * @param defaultSurrogate
-   * @param isIncludeFilter
-   * @return
-   * @throws FilterUnsupportedException
-   */
-  public static DimColumnFilterInfo getFilterListForAllMembersRS(Expression expression,
-      ColumnExpression columnExpression, String defaultValues, int defaultSurrogate,
-      boolean isIncludeFilter) throws FilterUnsupportedException {
-    List<Integer> filterValuesList = new ArrayList<Integer>(20);
-    List<String> evaluateResultListFinal = new ArrayList<String>(20);
-    DimColumnFilterInfo columnFilterInfo = null;
-
-    // KeyGenerator keyGenerator =
-    // KeyGeneratorFactory.getKeyGenerator(new int[] { defaultSurrogate });
-    try {
-      RowIntf row = new RowImpl();
-      if (defaultValues.equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
-        defaultValues = null;
-      }
-      row.setValues(new Object[] { DataTypeUtil.getDataBasedOnDataType(defaultValues,
-          columnExpression.getCarbonColumn().getDataType()) });
-      Boolean rslt = expression.evaluate(row).getBoolean();
-      if (null != rslt && !(rslt ^ isIncludeFilter)) {
-        if (null == defaultValues) {
-          evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
-        } else {
-          evaluateResultListFinal.add(defaultValues);
-        }
-      }
-    } catch (FilterIllegalMemberException e) {
-      LOGGER.audit(e.getMessage());
-    }
-
-    if (null == defaultValues) {
-      defaultValues = CarbonCommonConstants.MEMBER_DEFAULT_VAL;
-    }
-    columnFilterInfo = new DimColumnFilterInfo();
-    for (int i = 0; i < evaluateResultListFinal.size(); i++) {
-      if (evaluateResultListFinal.get(i).equals(defaultValues)) {
-        filterValuesList.add(defaultSurrogate);
-        break;
-      }
-    }
-    columnFilterInfo.setFilterList(filterValuesList);
-    return columnFilterInfo;
-  }
-
-  public static byte[][] getKeyArray(DimColumnFilterInfo dimColumnFilterInfo,
-      CarbonDimension carbonDimension, KeyGenerator blockLevelKeyGenerator) {
-    if (!carbonDimension.hasEncoding(Encoding.DICTIONARY)) {
-      return dimColumnFilterInfo.getNoDictionaryFilterValuesList()
-          .toArray((new byte[dimColumnFilterInfo.getNoDictionaryFilterValuesList().size()][]));
-    }
-    int[] keys = new int[blockLevelKeyGenerator.getDimCount()];
-    List<byte[]> filterValuesList = new ArrayList<byte[]>(20);
-    Arrays.fill(keys, 0);
-    int[] rangesForMaskedByte =
-        getRangesForMaskedByte((carbonDimension.getKeyOrdinal()), blockLevelKeyGenerator);
-    if (null != dimColumnFilterInfo) {
-      for (Integer surrogate : dimColumnFilterInfo.getFilterList()) {
-        try {
-          keys[carbonDimension.getKeyOrdinal()] = surrogate;
-          filterValuesList
-              .add(getMaskedKey(rangesForMaskedByte, blockLevelKeyGenerator.generateKey(keys)));
-        } catch (KeyGenException e) {
-          LOGGER.error(e.getMessage());
-        }
-      }
-
-    }
-    return filterValuesList.toArray(new byte[filterValuesList.size()][]);
-
-  }
-
-  /**
-   * The method is used to get the single dictionary key's mask key
-   *
-   * @param surrogate
-   * @param carbonDimension
-   * @param blockLevelKeyGenerator
-   * @return
-   */
-  public static byte[] getMaskKey(int surrogate, CarbonDimension carbonDimension,
-      KeyGenerator blockLevelKeyGenerator) {
-
-    int[] keys = new int[blockLevelKeyGenerator.getDimCount()];
-    byte[] maskedKey = null;
-    Arrays.fill(keys, 0);
-    int[] rangesForMaskedByte =
-        getRangesForMaskedByte((carbonDimension.getKeyOrdinal()), blockLevelKeyGenerator);
-    try {
-      keys[carbonDimension.getKeyOrdinal()] = surrogate;
-      maskedKey = getMaskedKey(rangesForMaskedByte, blockLevelKeyGenerator.generateKey(keys));
-    } catch (KeyGenException e) {
-      LOGGER.error(e.getMessage());
-    }
-    return maskedKey;
-  }
-
-  /**
-   * Method will return the start key based on KeyGenerator for the respective
-   * filter resolved instance.
-   *
-   * @param dimColResolvedFilterInfo
-   * @param segmentProperties
-   * @return long[] start key
-   */
-  public static void getStartKey(Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter,
-      long[] startKey, List<long[]> startKeyList) throws QueryExecutionException {
-    for(int i = 0; i < startKey.length; i++) {
-      // The min surrogate key is 1, set it as the init value for starkey of each column level
-      startKey[i] = 1;
-    }
-    getStartKeyWithFilter(dimensionFilter, startKey, startKeyList);
-  }
-
-  /**
-   * Algorithm for getting the start key for a filter
-   * step 1: Iterate through each dimension and verify whether its not an exclude filter.
-   * step 2: Intialize start key with the first filter member value present in each filter model
-   * for the respective dimensions.
-   * step 3: since its a no dictionary start key there will only actual value so compare
-   * the first filter model value with respect to the dimension data type.
-   * step 4: The least value will be considered as the start key of dimension by comparing all
-   * its filter model.
-   * step 5: create a byte array of start key which comprises of least filter member value of
-   * all dimension and the indexes which will help to read the respective filter value.
-   *
-   * @param dimColResolvedFilterInfo
-   * @param segmentProperties
-   * @param setOfStartKeyByteArray
-   * @return
-   */
-  public static void getStartKeyForNoDictionaryDimension(
-      DimColumnResolvedFilterInfo dimColResolvedFilterInfo,
-      SortedMap<Integer, byte[]> setOfStartKeyByteArray) {
-    Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter =
-        dimColResolvedFilterInfo.getDimensionResolvedFilterInstance();
-    // step 1
-    for (Map.Entry<CarbonDimension, List<DimColumnFilterInfo>> entry : dimensionFilter.entrySet()) {
-      if (!entry.getKey().hasEncoding(Encoding.DICTIONARY)) {
-        List<DimColumnFilterInfo> listOfDimColFilterInfo = entry.getValue();
-        if (null == listOfDimColFilterInfo) {
-          continue;
-        }
-        boolean isExcludePresent = false;
-        for (DimColumnFilterInfo info : listOfDimColFilterInfo) {
-          if (!info.isIncludeFilter()) {
-            isExcludePresent = true;
-          }
-        }
-        if (isExcludePresent) {
-          continue;
-        }
-        // step 2
-        byte[] noDictionaryStartKey =
-            listOfDimColFilterInfo.get(0).getNoDictionaryFilterValuesList().get(0);
-        if (setOfStartKeyByteArray.isEmpty()) {
-          setOfStartKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryStartKey);
-        } else if (null == setOfStartKeyByteArray.get(entry.getKey().getOrdinal())) {
-          setOfStartKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryStartKey);
-
-        } else if (ByteUtil.UnsafeComparer.INSTANCE
-            .compareTo(setOfStartKeyByteArray.get(entry.getKey().getOrdinal()),
-                noDictionaryStartKey) > 0) {
-          setOfStartKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryStartKey);
-        }
-      }
-    }
-  }
-
-  /**
-   * Algorithm for getting the end key for a filter
-   * step 1: Iterate through each dimension and verify whether its not an exclude filter.
-   * step 2: Initialize end key with the last filter member value present in each filter model
-   * for the respective dimensions.(Already filter models are sorted)
-   * step 3: since its a no dictionary end key there will only actual value so compare
-   * the last filter model value with respect to the dimension data type.
-   * step 4: The highest value will be considered as the end key of dimension by comparing all
-   * its filter model.
-   * step 5: create a byte array of end key which comprises of highest filter member value of
-   * all dimension and the indexes which will help to read the respective filter value.
-   *
-   * @param dimColResolvedFilterInfo
-   * @param segmentProperties
-   * @param setOfEndKeyByteArray
-   * @return end key array
-   */
-  public static void getEndKeyForNoDictionaryDimension(
-      DimColumnResolvedFilterInfo dimColResolvedFilterInfo,
-      SortedMap<Integer, byte[]> setOfEndKeyByteArray) {
-
-    Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter =
-        dimColResolvedFilterInfo.getDimensionResolvedFilterInstance();
-    // step 1
-    for (Map.Entry<CarbonDimension, List<DimColumnFilterInfo>> entry : dimensionFilter.entrySet()) {
-      if (!entry.getKey().hasEncoding(Encoding.DICTIONARY)) {
-        List<DimColumnFilterInfo> listOfDimColFilterInfo = entry.getValue();
-        if (null == listOfDimColFilterInfo) {
-          continue;
-        }
-        boolean isExcludePresent = false;
-        for (DimColumnFilterInfo info : listOfDimColFilterInfo) {
-          if (!info.isIncludeFilter()) {
-            isExcludePresent = true;
-          }
-        }
-        if (isExcludePresent) {
-          continue;
-        }
-        // step 2
-        byte[] noDictionaryEndKey = listOfDimColFilterInfo.get(0).getNoDictionaryFilterValuesList()
-            .get(listOfDimColFilterInfo.get(0).getNoDictionaryFilterValuesList().size() - 1);
-        if (setOfEndKeyByteArray.isEmpty()) {
-          setOfEndKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryEndKey);
-        } else if (null == setOfEndKeyByteArray.get(entry.getKey().getOrdinal())) {
-          setOfEndKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryEndKey);
-
-        } else if (ByteUtil.UnsafeComparer.INSTANCE
-            .compareTo(setOfEndKeyByteArray.get(entry.getKey().getOrdinal()), noDictionaryEndKey)
-            < 0) {
-          setOfEndKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryEndKey);
-        }
-
-      }
-    }
-  }
-
-  /**
-   * Method will pack all the byte[] to a single byte[] value by appending the
-   * indexes of the byte[] value which needs to be read. this method will be mailny used
-   * in case of no dictionary dimension processing for filters.
-   *
-   * @param noDictionaryValKeyList
-   * @return packed key with its indexes added in starting and its actual values.
-   */
-  private static byte[] getKeyWithIndexesAndValues(List<byte[]> noDictionaryValKeyList) {
-    ByteBuffer[] buffArr = new ByteBuffer[noDictionaryValKeyList.size()];
-    int index = 0;
-    for (byte[] singleColVal : noDictionaryValKeyList) {
-      buffArr[index] = ByteBuffer.allocate(singleColVal.length);
-      buffArr[index].put(singleColVal);
-      buffArr[index++].rewind();
-    }
-    // byteBufer.
-    return CarbonUtil.packByteBufferIntoSingleByteArray(buffArr);
-
-  }
-
-  /**
-   * This method will fill the start key array  with the surrogate key present
-   * in filterinfo instance.
-   *
-   * @param dimensionFilter
-   * @param startKey
-   */
-  private static void getStartKeyWithFilter(
-      Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter, long[] startKey,
-      List<long[]> startKeyList) {
-    for (Map.Entry<CarbonDimension, List<DimColumnFilterInfo>> entry : dimensionFilter.entrySet()) {
-      List<DimColumnFilterInfo> values = entry.getValue();
-      if (null == values || !entry.getKey().hasEncoding(Encoding.DICTIONARY)) {
-        continue;
-      }
-      boolean isExcludePresent = false;
-      for (DimColumnFilterInfo info : values) {
-        if (!info.isIncludeFilter()) {
-          isExcludePresent = true;
-        }
-      }
-      if (isExcludePresent) {
-        continue;
-      }
-      for (DimColumnFilterInfo info : values) {
-        if (startKey[entry.getKey().getKeyOrdinal()] < info.getFilterList().get(0)) {
-          startKey[entry.getKey().getKeyOrdinal()] = info.getFilterList().get(0);
-        }
-      }
-      long[] newStartKey = new long[startKey.length];
-      System.arraycopy(startKey, 0, newStartKey, 0, startKey.length);
-      startKeyList.add(newStartKey);
-    }
-  }
-
-  public static void getEndKey(Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter,
-      AbsoluteTableIdentifier tableIdentifier, long[] endKey, SegmentProperties segmentProperties,
-      List<long[]> endKeyList) throws QueryExecutionException {
-
-    List<CarbonDimension> updatedDimListBasedOnKeyGenerator =
-        getCarbonDimsMappedToKeyGenerator(segmentProperties.getDimensions());
-    for (int i = 0; i < endKey.length; i++) {
-      endKey[i] = getMaxValue(tableIdentifier, updatedDimListBasedOnKeyGenerator.get(i),
-          segmentProperties.getDimColumnsCardinality());
-    }
-    getEndKeyWithFilter(dimensionFilter, endKey, endKeyList);
-
-  }
-
-  private static List<CarbonDimension> getCarbonDimsMappedToKeyGenerator(
-      List<CarbonDimension> carbonDimensions) {
-    List<CarbonDimension> listOfCarbonDimPartOfKeyGen =
-        new ArrayList<CarbonDimension>(carbonDimensions.size());
-    for (CarbonDimension carbonDim : carbonDimensions) {
-      if (CarbonUtil.hasEncoding(carbonDim.getEncoder(), Encoding.DICTIONARY) || CarbonUtil
-          .hasEncoding(carbonDim.getEncoder(), Encoding.DIRECT_DICTIONARY)) {
-        listOfCarbonDimPartOfKeyGen.add(carbonDim);
-      }
-
-    }
-    return listOfCarbonDimPartOfKeyGen;
-  }
-
-  private static void getEndKeyWithFilter(
-      Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter, long[] endKey,
-      List<long[]> endKeyList) {
-    for (Map.Entry<CarbonDimension, List<DimColumnFilterInfo>> entry : dimensionFilter.entrySet()) {
-      List<DimColumnFilterInfo> values = entry.getValue();
-      if (null == values || !entry.getKey().hasEncoding(Encoding.DICTIONARY)) {
-        continue;
-      }
-      boolean isExcludeFilterPresent = false;
-      for (DimColumnFilterInfo info : values) {
-        if (!info.isIncludeFilter()) {
-          isExcludeFilterPresent = true;
-        }
-      }
-      if (isExcludeFilterPresent) {
-        continue;
-      }
-
-      for (DimColumnFilterInfo info : values) {
-        if (endKey[entry.getKey().getKeyOrdinal()] > info.getFilterList()
-            .get(info.getFilterList().size() - 1)) {
-          endKey[entry.getKey().getKeyOrdinal()] =
-              info.getFilterList().get(info.getFilterList().size() - 1);
-        }
-      }
-      long[] newEndKey = new long[endKey.length];
-      System.arraycopy(endKey, 0, newEndKey, 0, endKey.length);
-      endKeyList.add(newEndKey);
-    }
-
-  }
-
-  /**
-   * This API will get the max value of surrogate key which will be used for
-   * determining the end key of particular btree.
-   *
-   * @param dimCarinality
-   * @throws QueryExecutionException
-   */
-  private static long getMaxValue(AbsoluteTableIdentifier tableIdentifier,
-      CarbonDimension carbonDimension, int[] dimCarinality) throws QueryExecutionException {
-    //    if (DataType.TIMESTAMP == carbonDimension.getDataType()) {
-    //      return Integer.MAX_VALUE;
-    //    }
-    // Get data from all the available slices of the table
-    if (null != dimCarinality) {
-      return dimCarinality[carbonDimension.getKeyOrdinal()];
-    }
-    return -1;
-  }
-
-  /**
-   * @param tableIdentifier
-   * @param carbonDimension
-   * @return
-   * @throws QueryExecutionException
-   */
-  public static Dictionary getForwardDictionaryCache(AbsoluteTableIdentifier tableIdentifier,
-      CarbonDimension carbonDimension) throws QueryExecutionException {
-    DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier =
-        new DictionaryColumnUniqueIdentifier(tableIdentifier.getCarbonTableIdentifier(),
-            carbonDimension.getColumnIdentifier(), carbonDimension.getDataType());
-    CacheProvider cacheProvider = CacheProvider.getInstance();
-    Cache forwardDictionaryCache =
-        cacheProvider.createCache(CacheType.FORWARD_DICTIONARY, tableIdentifier.getStorePath());
-    // get the forward dictionary object
-    Dictionary forwardDictionary = null;
-    try {
-      forwardDictionary = (Dictionary) forwardDictionaryCache.get(dictionaryColumnUniqueIdentifier);
-    } catch (CarbonUtilException e) {
-      throw new QueryExecutionException(e);
-    }
-    return forwardDictionary;
-  }
-
-  public static IndexKey createIndexKeyFromResolvedFilterVal(long[] startOrEndKey,
-      KeyGenerator keyGenerator, byte[] startOrEndKeyForNoDictDimension) {
-    IndexKey indexKey = null;
-    try {
-      indexKey =
-          new IndexKey(keyGenerator.generateKey(startOrEndKey), startOrEndKeyForNoDictDimension);
-    } catch (KeyGenException e) {
-      LOGGER.error(e.getMessage());
-    }
-    return indexKey;
-  }
-
-  /**
-   * API will create an filter executer tree based on the filter resolver
-   *
-   * @param filterExpressionResolverTree
-   * @param segmentProperties
-   * @return
-   */
-  public static FilterExecuter getFilterExecuterTree(
-      FilterResolverIntf filterExpressionResolverTree, SegmentProperties segmentProperties,
-      Map<Integer, GenericQueryType> complexDimensionInfoMap) {
-    return createFilterExecuterTree(filterExpressionResolverTree, segmentProperties,
-        complexDimensionInfoMap);
-  }
-
-  /**
-   * API will prepare the Keys from the surrogates of particular filter resolver
-   *
-   * @param filterValues
-   * @param blockKeyGenerator
-   * @param dimension
-   * @param dimColumnExecuterInfo
-   */
-  public static void prepareKeysFromSurrogates(DimColumnFilterInfo filterValues,
-      KeyGenerator blockKeyGenerator, CarbonDimension dimension,
-      DimColumnExecuterFilterInfo dimColumnExecuterInfo) {
-    byte[][] keysBasedOnFilter = getKeyArray(filterValues, dimension, blockKeyGenerator);
-    dimColumnExecuterInfo.setFilterKeys(keysBasedOnFilter);
-
-  }
-
-  /**
-   * method will create a default end key in case of no end key is been derived using existing
-   * filter or in case of non filter queries.
-   *
-   * @param segmentProperties
-   * @return
-   * @throws KeyGenException
-   */
-  public static IndexKey prepareDefaultEndIndexKey(SegmentProperties segmentProperties)
-      throws KeyGenException {
-    long[] dictionarySurrogateKey =
-        new long[segmentProperties.getDimensions().size() - segmentProperties
-            .getNumberOfNoDictionaryDimension()];
-    Arrays.fill(dictionarySurrogateKey, Long.MAX_VALUE);
-    IndexKey endIndexKey;
-    byte[] dictionaryendMdkey =
-        segmentProperties.getDimensionKeyGenerator().generateKey(dictionarySurrogateKey);
-    byte[] noDictionaryEndKeyBuffer = getNoDictionaryDefaultEndKey(segmentProperties);
-    endIndexKey = new IndexKey(dictionaryendMdkey, noDictionaryEndKeyBuffer);
-    return endIndexKey;
-  }
-
-  public static byte[] getNoDictionaryDefaultEndKey(SegmentProperties segmentProperties) {
-    // in case of non filter query when no dictionary columns are present we
-    // need to set the default end key, as for non filter query
-    // we need to get the last
-    // block of the btree so we are setting the max byte value in the end key
-    ByteBuffer noDictionaryEndKeyBuffer = ByteBuffer.allocate(
-        (segmentProperties.getNumberOfNoDictionaryDimension()
-            * CarbonCommonConstants.SHORT_SIZE_IN_BYTE) + segmentProperties
-            .getNumberOfNoDictionaryDimension());
-    // end key structure will be
-    //<Offset of first No Dictionary key in 2 Bytes><Offset of second No Dictionary key in 2 Bytes>
-    //<Offset of n No Dictionary key in 2 Bytes><first no dictionary column value>
-    // <second no dictionary column value> <N no dictionary column value>
-    //example if we have 2 no dictionary column
-    //<[0,4,0,5,127,127]>
-    short startPoint = (short) (segmentProperties.getNumberOfNoDictionaryDimension()
-        * CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
-    for (int i = 0; i < segmentProperties.getNumberOfNoDictionaryDimension(); i++) {
-      noDictionaryEndKeyBuffer.putShort((startPoint));
-      startPoint++;
-    }
-    for (int i = 0; i < segmentProperties.getNumberOfNoDictionaryDimension(); i++) {
-      noDictionaryEndKeyBuffer.put((byte) 127);
-    }
-    return noDictionaryEndKeyBuffer.array();
-  }
-
-  /**
-   * method will create a default end key in case of no end key is been
-   * derived using existing filter or in case of non filter queries.
-   *
-   * @param segmentProperties
-   * @return
-   * @throws KeyGenException
-   */
-  public static IndexKey prepareDefaultStartIndexKey(SegmentProperties segmentProperties)
-      throws KeyGenException {
-    IndexKey startIndexKey;
-    long[] dictionarySurrogateKey =
-        new long[segmentProperties.getDimensions().size() - segmentProperties
-            .getNumberOfNoDictionaryDimension()];
-    byte[] dictionaryStartMdkey =
-        segmentProperties.getDimensionKeyGenerator().generateKey(dictionarySurrogateKey);
-    byte[] noDictionaryStartKeyArray = getNoDictionaryDefaultStartKey(segmentProperties);
-
-    startIndexKey = new IndexKey(dictionaryStartMdkey, noDictionaryStartKeyArray);
-    return startIndexKey;
-  }
-
-  public static byte[] getNoDictionaryDefaultStartKey(SegmentProperties segmentProperties) {
-    // in case of non filter query when no dictionary columns are present we
-    // need to set the default start key, as for non filter query we need to get the first
-    // block of the btree so we are setting the least byte value in the start key
-    ByteBuffer noDictionaryStartKeyBuffer = ByteBuffer.allocate(
-        (segmentProperties.getNumberOfNoDictionaryDimension()
-            * CarbonCommonConstants.SHORT_SIZE_IN_BYTE) + segmentProperties
-            .getNumberOfNoDictionaryDimension());
-    // end key structure will be
-    //<Offset of first No Dictionary key in 2 Bytes><Offset of second No Dictionary key in 2 Bytes>
-    //<Offset of n No Dictionary key in 2 Bytes><first no dictionary column value>
-    // <second no dictionary column value> <N no dictionary column value>
-    //example if we have 2 no dictionary column
-    //<[0,4,0,5,0,0]>
-    short startPoint = (short) (segmentProperties.getNumberOfNoDictionaryDimension()
-        * CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
-    for (int i = 0; i < segmentProperties.getNumberOfNoDictionaryDimension(); i++) {
-      noDictionaryStartKeyBuffer.putShort((startPoint));
-      startPoint++;
-    }
-    for (int i = 0; i < segmentProperties.getNumberOfNoDictionaryDimension(); i++) {
-      noDictionaryStartKeyBuffer.put((byte) 0);
-    }
-    return noDictionaryStartKeyBuffer.array();
-  }
-
-  public static int compareFilterKeyBasedOnDataType(String dictionaryVal, String memberVal,
-      DataType dataType) {
-    try {
-      switch (dataType) {
-        case SHORT:
-          return Short.compare((Short.parseShort(dictionaryVal)), (Short.parseShort(memberVal)));
-        case INT:
-          return Integer.compare((Integer.parseInt(dictionaryVal)), (Integer.parseInt(memberVal)));
-        case DOUBLE:
-          return Double
-              .compare((Double.parseDouble(dictionaryVal)), (Double.parseDouble(memberVal)));
-        case LONG:
-          return Long.compare((Long.parseLong(dictionaryVal)), (Long.parseLong(memberVal)));
-        case BOOLEAN:
-          return Boolean
-              .compare((Boolean.parseBoolean(dictionaryVal)), (Boolean.parseBoolean(memberVal)));
-        case TIMESTAMP:
-          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
-              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-          Date dateToStr;
-          Date dictionaryDate;
-          dateToStr = parser.parse(memberVal);
-          dictionaryDate = parser.parse(dictionaryVal);
-          return dictionaryDate.compareTo(dateToStr);
-
-        case DECIMAL:
-          java.math.BigDecimal javaDecValForDictVal = new java.math.BigDecimal(dictionaryVal);
-          java.math.BigDecimal javaDecValForMemberVal = new java.math.BigDecimal(memberVal);
-          return javaDecValForDictVal.compareTo(javaDecValForMemberVal);
-        default:
-          return -1;
-      }
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  /**
-   * method will set the start and end key for as per the filter resolver tree
-   * utilized visitor pattern inorder to populate the start and end key population.
-   *
-   * @param segmentProperties
-   * @param tableIdentifier
-   * @param filterResolver
-   * @param listOfStartEndKeys
-   * @throws QueryExecutionException
-   */
-  public static void traverseResolverTreeAndGetStartAndEndKey(SegmentProperties segmentProperties,
-      AbsoluteTableIdentifier tableIdentifier, FilterResolverIntf filterResolver,
-      List<IndexKey> listOfStartEndKeys) throws QueryExecutionException {
-    IndexKey searchStartKey = null;
-    IndexKey searchEndKey = null;
-    long[] startKey = new long[segmentProperties.getDimensionKeyGenerator().getDimCount()];
-    long[] endKey = new long[segmentProperties.getDimensionKeyGenerator().getDimCount()];
-    List<byte[]> listOfStartKeyByteArray =
-        new ArrayList<byte[]>(segmentProperties.getNumberOfNoDictionaryDimension());
-    List<byte[]> listOfEndKeyByteArray =
-        new ArrayList<byte[]>(segmentProperties.getNumberOfNoDictionaryDimension());
-    SortedMap<Integer, byte[]> setOfStartKeyByteArray = new TreeMap<Integer, byte[]>();
-    SortedMap<Integer, byte[]> setOfEndKeyByteArray = new TreeMap<Integer, byte[]>();
-    SortedMap<Integer, byte[]> defaultStartValues = new TreeMap<Integer, byte[]>();
-    SortedMap<Integer, byte[]> defaultEndValues = new TreeMap<Integer, byte[]>();
-    List<long[]> startKeyList = new ArrayList<long[]>();
-    List<long[]> endKeyList = new ArrayList<long[]>();
-    traverseResolverTreeAndPopulateStartAndEndKeys(filterResolver, tableIdentifier,
-        segmentProperties, startKey, setOfStartKeyByteArray, endKey, setOfEndKeyByteArray,
-        startKeyList, endKeyList);
-    if (endKeyList.size() > 0) {
-      //get the new end key from list
-      for (int i = 0; i < endKey.length; i++) {
-        long[] endkeyColumnLevel = new long[endKeyList.size()];
-        int j = 0;
-        for (long[] oneEndKey : endKeyList) {
-          //get each column level end key
-          endkeyColumnLevel[j++] = oneEndKey[i];
-        }
-        Arrays.sort(endkeyColumnLevel);
-        // get the max one as end of this column level
-        endKey[i] = endkeyColumnLevel[endkeyColumnLevel.length - 1];
-      }
-    }
-
-    if (startKeyList.size() > 0) {
-      //get the new start key from list
-      for (int i = 0; i < startKey.length; i++) {
-        long[] startkeyColumnLevel = new long[startKeyList.size()];
-        int j = 0;
-        for (long[] oneStartKey : startKeyList) {
-          //get each column level start key
-          startkeyColumnLevel[j++] = oneStartKey[i];
-        }
-        Arrays.sort(startkeyColumnLevel);
-        // get the min - 1 as start of this column level, for example if a block contains 5,6
-        // the filter is 6, but that block's start key is 5, if not -1, this block will missing.
-        startKey[i] = startkeyColumnLevel[0] - 1;
-      }
-    }
-
-    fillDefaultStartValue(defaultStartValues, segmentProperties);
-    fillDefaultEndValue(defaultEndValues, segmentProperties);
-    fillNullValuesStartIndexWithDefaultKeys(setOfStartKeyByteArray, segmentProperties);
-    fillNullValuesEndIndexWithDefaultKeys(setOfEndKeyByteArray, segmentProperties);
-    pruneStartAndEndKeys(setOfStartKeyByteArray, listOfStartKeyByteArray);
-    pruneStartAndEndKeys(setOfEndKeyByteArray, listOfEndKeyByteArray);
-
-    searchStartKey = FilterUtil
-        .createIndexKeyFromResolvedFilterVal(startKey, segmentProperties.getDimensionKeyGenerator(),
-            FilterUtil.getKeyWithIndexesAndValues(listOfStartKeyByteArray));
-
-    searchEndKey = FilterUtil
-        .createIndexKeyFromResolvedFilterVal(endKey, segmentProperties.getDimensionKeyGenerator(),
-            FilterUtil.getKeyWithIndexesAndValues(listOfEndKeyByteArray));
-    listOfStartEndKeys.add(searchStartKey);
-    listOfStartEndKeys.add(searchEndKey);
-
-  }
-
-  private static int compareFilterMembersBasedOnActualDataType(String filterMember1,
-      String filterMember2, DataType dataType) {
-    try {
-      switch (dataType) {
-        case SHORT:
-        case INT:
-        case LONG:
-        case DOUBLE:
-
-          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(filterMember1)) {
-            return 1;
-          }
-          Double d1 = Double.parseDouble(filterMember1);
-          Double d2 = Double.parseDouble(filterMember2);
-          return d1.compareTo(d2);
-        case DECIMAL:
-          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(filterMember1)) {
-            return 1;
-          }
-          java.math.BigDecimal val1 = new BigDecimal(filterMember1);
-          java.math.BigDecimal val2 = new BigDecimal(filterMember2);
-          return val1.compareTo(val2);
-        case TIMESTAMP:
-          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(filterMember1)) {
-            return 1;
-          }
-          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
-              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-          Date date1 = null;
-          Date date2 = null;
-          date1 = parser.parse(filterMember1);
-          date2 = parser.parse(filterMember2);
-          return date1.compareTo(date2);
-        case STRING:
-        default:
-          return filterMember1.compareTo(filterMember2);
-      }
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  private static void fillNullValuesStartIndexWithDefaultKeys(
-      SortedMap<Integer, byte[]> setOfStartKeyByteArray, SegmentProperties segmentProperties) {
-    List<CarbonDimension> allDimension = segmentProperties.getDimensions();
-    for (CarbonDimension dimension : allDimension) {
-      if (CarbonUtil.hasEncoding(dimension.getEncoder(), Encoding.DICTIONARY)) {
-        continue;
-      }
-      if (null == setOfStartKeyByteArray.get(dimension.getOrdinal())) {
-        setOfStartKeyByteArray.put(dimension.getOrdinal(), new byte[] { 0 });
-      }
-
-    }
-  }
-
-  private static void fillNullValuesEndIndexWithDefaultKeys(
-      SortedMap<Integer, byte[]> setOfStartKeyByteArray, SegmentProperties segmentProperties) {
-    List<CarbonDimension> allDimension = segmentProperties.getDimensions();
-    for (CarbonDimension dimension : allDimension) {
-      if (CarbonUtil.hasEncoding(dimension.getEncoder(), Encoding.DICTIONARY)) {
-        continue;
-      }
-      if (null == setOfStartKeyByteArray.get(dimension.getOrdinal())) {
-        setOfStartKeyByteArray.put(dimension.getOrdinal(), new byte[] { 127 });
-      }
-
-    }
-  }
-
-  private static void pruneStartAndEndKeys(SortedMap<Integer, byte[]> setOfStartKeyByteArray,
-      List<byte[]> listOfStartKeyByteArray) {
-    for (Map.Entry<Integer, byte[]> entry : setOfStartKeyByteArray.entrySet()) {
-      listOfStartKeyByteArray.add(entry.getValue());
-    }
-  }
-
-  private static void fillDefaultStartValue(SortedMap<Integer, byte[]> setOfStartKeyByteArray,
-      SegmentProperties segmentProperties) {
-    List<CarbonDimension> allDimension = segmentProperties.getDimensions();
-    for (CarbonDimension dimension : allDimension) {
-      if (CarbonUtil.hasEncoding(dimension.getEncoder(), Encoding.DICTIONARY)) {
-        continue;
-      }
-      setOfStartKeyByteArray.put(dimension.getOrdinal(), new byte[] { 0 });
-    }
-
-  }
-
-  private static void fillDefaultEndValue(SortedMap<Integer, byte[]> setOfEndKeyByteArray,
-      SegmentProperties segmentProperties) {
-    List<CarbonDimension> allDimension = segmentProperties.getDimensions();
-    for (CarbonDimension dimension : allDimension) {
-      if (CarbonUtil.hasEncoding(dimension.getEncoder(), Encoding.DICTIONARY)) {
-        continue;
-      }
-      setOfEndKeyByteArray.put(dimension.getOrdinal(), new byte[] { 127 });
-    }
-  }
-
-  private static void traverseResolverTreeAndPopulateStartAndEndKeys(
-      FilterResolverIntf filterResolverTree, AbsoluteTableIdentifier tableIdentifier,
-      SegmentProperties segmentProperties, long[] startKeys,
-      SortedMap<Integer, byte[]> setOfStartKeyByteArray, long[] endKeys,
-      SortedMap<Integer, byte[]> setOfEndKeyByteArray, List<long[]> startKeyList,
-      List<long[]> endKeyList) throws QueryExecutionException {
-    if (null == filterResolverTree) {
-      return;
-    }
-    traverseResolverTreeAndPopulateStartAndEndKeys(filterResolverTree.getLeft(), tableIdentifier,
-        segmentProperties, startKeys, setOfStartKeyByteArray, endKeys, setOfEndKeyByteArray,
-        startKeyList, endKeyList);
-    filterResolverTree.getStartKey(startKeys, setOfStartKeyByteArray, startKeyList);
-    filterResolverTree.getEndKey(segmentProperties, tableIdentifier, endKeys, setOfEndKeyByteArray,
-        endKeyList);
-
-    traverseResolverTreeAndPopulateStartAndEndKeys(filterResolverTree.getRight(), tableIdentifier,
-        segmentProperties, startKeys, setOfStartKeyByteArray, endKeys, setOfEndKeyByteArray,
-        startKeyList, endKeyList);
-  }
-
-  /**
-   * Method will find whether the expression needs to be resolved, this can happen
-   * if the expression is exclude and data type is null(mainly in IS NOT NULL filter scenario)
-   *
-   * @param rightExp
-   * @param isIncludeFilter
-   * @return
-   */
-  public static boolean isExpressionNeedsToResolved(Expression rightExp, boolean isIncludeFilter) {
-    if (!isIncludeFilter && rightExp instanceof LiteralExpression && (
-        DataType.NULL == ((LiteralExpression) rightExp)
-            .getLiteralExpDataType())) {
-      return true;
-    }
-    for (Expression child : rightExp.getChildren()) {
-      if (isExpressionNeedsToResolved(child, isIncludeFilter)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * This method will print the error log.
-   *
-   * @param e
-   */
-  public static void logError(Throwable e, boolean invalidRowsPresent) {
-    if (!invalidRowsPresent) {
-      invalidRowsPresent = true;
-      LOGGER.error(e, CarbonCommonConstants.FILTER_INVALID_MEMBER + e.getMessage());
-    }
-  }
-
-  /**
-   * This method will return list of all the unknown expressions
-   *
-   * @param expression
-   */
-  public static List<UnknownExpression> getUnknownExpressionsList(Expression expression) {
-    List<UnknownExpression> listOfExp =
-        new ArrayList<UnknownExpression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    getUnknownExpressionsList(expression, listOfExp);
-    return listOfExp;
-  }
-
-  /**
-   * This method will prepare the list with all unknown expressions
-   *
-   * @param expression
-   * @param lst
-   */
-  private static void getUnknownExpressionsList(Expression expression,
-      List<UnknownExpression> lst) {
-    if (expression instanceof UnknownExpression) {
-      UnknownExpression colExp = (UnknownExpression) expression;
-      lst.add(colExp);
-      return;
-    }
-    for (Expression child : expression.getChildren()) {
-      getUnknownExpressionsList(child, lst);
-    }
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/GenericQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/GenericQueryType.java b/core/src/main/java/org/carbondata/scan/filter/GenericQueryType.java
deleted file mode 100644
index a2b2da3..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/GenericQueryType.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter;
-
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-import org.apache.spark.sql.types.DataType;
-
-public interface GenericQueryType {
-
-  String getName();
-
-  void setName(String name);
-
-  String getParentname();
-
-  void setParentname(String parentname);
-
-  int getBlockIndex();
-
-  void setBlockIndex(int blockIndex);
-
-  void addChildren(GenericQueryType children);
-
-  void getAllPrimitiveChildren(List<GenericQueryType> primitiveChild);
-
-  int getSurrogateIndex();
-
-  void setSurrogateIndex(int surrIndex);
-
-  int getColsCount();
-
-  void setKeySize(int[] keyBlockSize);
-
-  int getKeyOrdinalForQuery();
-
-  void setKeyOrdinalForQuery(int keyOrdinalForQuery);
-
-  void parseBlocksAndReturnComplexColumnByteArray(DimensionColumnDataChunk[] dimensionDataChunks,
-      int rowNumber, DataOutputStream dataOutputStream) throws IOException;
-
-  DataType getSchemaType();
-
-  void parseAndGetResultBytes(ByteBuffer complexData, DataOutputStream dataOutput)
-      throws IOException;
-
-  void fillRequiredBlockData(BlocksChunkHolder blockChunkHolder);
-
-  Object getDataBasedOnDataTypeFromSurrogates(ByteBuffer surrogateData);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/AndFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/AndFilterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/AndFilterExecuterImpl.java
deleted file mode 100644
index 10ad66f..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/AndFilterExecuterImpl.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.BitSet;
-
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public class AndFilterExecuterImpl implements FilterExecuter {
-
-  private FilterExecuter leftExecuter;
-  private FilterExecuter rightExecuter;
-
-  public AndFilterExecuterImpl(FilterExecuter leftExecuter, FilterExecuter rightExecuter) {
-    this.leftExecuter = leftExecuter;
-    this.rightExecuter = rightExecuter;
-  }
-
-  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
-      throws FilterUnsupportedException {
-    BitSet leftFilters = leftExecuter.applyFilter(blockChunkHolder);
-    if (leftFilters.isEmpty()) {
-      return leftFilters;
-    }
-    BitSet rightFilter = rightExecuter.applyFilter(blockChunkHolder);
-    if (rightFilter.isEmpty()) {
-      return rightFilter;
-    }
-    leftFilters.and(rightFilter);
-    return leftFilters;
-  }
-
-  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
-    BitSet leftFilters = leftExecuter.isScanRequired(blockMaxValue, blockMinValue);
-    if (leftFilters.isEmpty()) {
-      return leftFilters;
-    }
-    BitSet rightFilter = rightExecuter.isScanRequired(blockMaxValue, blockMinValue);
-    if (rightFilter.isEmpty()) {
-      return rightFilter;
-    }
-    leftFilters.and(rightFilter);
-    return leftFilters;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/DimColumnExecuterFilterInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/DimColumnExecuterFilterInfo.java b/core/src/main/java/org/carbondata/scan/filter/executer/DimColumnExecuterFilterInfo.java
deleted file mode 100644
index a81b4a1..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/DimColumnExecuterFilterInfo.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-public class DimColumnExecuterFilterInfo {
-
-  byte[][] filterKeys;
-
-  public void setFilterKeys(byte[][] filterKeys) {
-    this.filterKeys = filterKeys;
-  }
-
-  public byte[][] getFilterKeys() {
-    return filterKeys;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
deleted file mode 100644
index 8c57c14..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.keygenerator.KeyGenException;
-import org.carbondata.core.keygenerator.KeyGenerator;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-import org.carbondata.scan.executor.util.QueryUtil;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-
-/**
- * It checks if filter is required on given block and if required, it does
- * linear search on block data and set the bitset.
- */
-public class ExcludeColGroupFilterExecuterImpl extends ExcludeFilterExecuterImpl {
-
-  /**
-   * LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(ExcludeColGroupFilterExecuterImpl.class.getName());
-
-  /**
-   * @param dimColResolvedFilterInfo
-   * @param segmentProperties
-   */
-  public ExcludeColGroupFilterExecuterImpl(DimColumnResolvedFilterInfo dimColResolvedFilterInfo,
-      SegmentProperties segmentProperties) {
-    super(dimColResolvedFilterInfo, segmentProperties);
-  }
-
-  /**
-   * It fills BitSet with row index which matches filter key
-   */
-  protected BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    bitSet.flip(0, numerOfRows);
-    try {
-      KeyStructureInfo keyStructureInfo = getKeyStructureInfo();
-      byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-      for (int i = 0; i < filterValues.length; i++) {
-        byte[] filterVal = filterValues[i];
-        for (int rowId = 0; rowId < numerOfRows; rowId++) {
-          byte[] colData = new byte[keyStructureInfo.getMaskByteRanges().length];
-          dimensionColumnDataChunk.fillChunkData(colData, 0, rowId, keyStructureInfo);
-          if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, colData) == 0) {
-            bitSet.flip(rowId);
-          }
-        }
-      }
-
-    } catch (Exception e) {
-      LOGGER.error(e);
-    }
-
-    return bitSet;
-  }
-
-  /**
-   * It is required for extracting column data from columngroup chunk
-   *
-   * @return
-   * @throws KeyGenException
-   */
-  private KeyStructureInfo getKeyStructureInfo() throws KeyGenException {
-    int colGrpId = getColumnGroupId(dimColEvaluatorInfo.getColumnIndex());
-    KeyGenerator keyGenerator = segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
-    List<Integer> mdKeyOrdinal = new ArrayList<Integer>();
-    mdKeyOrdinal.add(getMdkeyOrdinal(dimColEvaluatorInfo.getColumnIndex(), colGrpId));
-    int[] maskByteRanges = QueryUtil.getMaskedByteRangeBasedOrdinal(mdKeyOrdinal, keyGenerator);
-    byte[] maxKey = QueryUtil.getMaxKeyBasedOnOrinal(mdKeyOrdinal, keyGenerator);
-    int[] maksedByte = QueryUtil.getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
-    KeyStructureInfo restructureInfos = new KeyStructureInfo();
-    restructureInfos.setKeyGenerator(keyGenerator);
-    restructureInfos.setMaskByteRanges(maskByteRanges);
-    restructureInfos.setMaxKey(maxKey);
-    restructureInfos.setMaskedBytes(maksedByte);
-    return restructureInfos;
-  }
-
-  /**
-   * Check if scan is required on given block based on min and max value
-   */
-  public BitSet isScanRequired(byte[][] blkMaxVal, byte[][] blkMinVal) {
-    BitSet bitSet = new BitSet(1);
-    bitSet.flip(0, 1);
-    return bitSet;
-  }
-
-  private int getMdkeyOrdinal(int ordinal, int colGrpId) {
-    return segmentProperties.getColumnGroupMdKeyOrdinal(colGrpId, ordinal);
-  }
-
-  private int getColumnGroupId(int ordinal) {
-    int[][] columnGroups = segmentProperties.getColumnGroups();
-    int colGrpId = -1;
-    for (int i = 0; i < columnGroups.length; i++) {
-      if (columnGroups[i].length > 1) {
-        colGrpId++;
-        if (QueryUtil.searchInArray(columnGroups[i], ordinal)) {
-          break;
-        }
-      }
-    }
-    return colGrpId;
-  }
-
-  public KeyGenerator getKeyGenerator(int colGrpId) {
-    return segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/ExcludeFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/ExcludeFilterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/ExcludeFilterExecuterImpl.java
deleted file mode 100644
index 43ea2e5..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/ExcludeFilterExecuterImpl.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.BitSet;
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.VariableLengthDimensionDataChunk;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public class ExcludeFilterExecuterImpl implements FilterExecuter {
-
-  protected DimColumnResolvedFilterInfo dimColEvaluatorInfo;
-  protected DimColumnExecuterFilterInfo dimColumnExecuterInfo;
-  protected SegmentProperties segmentProperties;
-
-  public ExcludeFilterExecuterImpl(DimColumnResolvedFilterInfo dimColEvaluatorInfo,
-      SegmentProperties segmentProperties) {
-    this.dimColEvaluatorInfo = dimColEvaluatorInfo;
-    dimColumnExecuterInfo = new DimColumnExecuterFilterInfo();
-    this.segmentProperties = segmentProperties;
-    FilterUtil.prepareKeysFromSurrogates(dimColEvaluatorInfo.getFilterValues(),
-        segmentProperties.getDimensionKeyGenerator(), dimColEvaluatorInfo.getDimension(),
-        dimColumnExecuterInfo);
-  }
-
-  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder) {
-    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
-        .get(dimColEvaluatorInfo.getColumnIndex());
-    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
-      blockChunkHolder.getDataBlock()
-          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
-    }
-    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
-      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
-          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
-    }
-    return getFilteredIndexes(
-        blockChunkHolder.getDimensionDataChunk()[blockIndex],
-        blockChunkHolder.getDataBlock().nodeSize());
-  }
-
-  protected BitSet getFilteredIndexes(DimensionColumnDataChunk dimColumnDataChunk,
-      int numerOfRows) {
-    // For high cardinality dimensions.
-    if (dimColumnDataChunk.getAttributes().isNoDictionary()
-        && dimColumnDataChunk instanceof VariableLengthDimensionDataChunk) {
-      return setDirectKeyFilterIndexToBitSet((VariableLengthDimensionDataChunk) dimColumnDataChunk,
-          numerOfRows);
-    }
-    if (null != dimColumnDataChunk.getAttributes().getInvertedIndexes()
-        && dimColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      return setFilterdIndexToBitSetWithColumnIndex(
-          (FixedLengthDimensionDataChunk) dimColumnDataChunk, numerOfRows);
-    }
-    return setFilterdIndexToBitSet((FixedLengthDimensionDataChunk) dimColumnDataChunk, numerOfRows);
-  }
-
-  private BitSet setDirectKeyFilterIndexToBitSet(
-      VariableLengthDimensionDataChunk dimColumnDataChunk, int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    bitSet.flip(0, numerOfRows);
-    List<byte[]> listOfColumnarKeyBlockDataForNoDictionaryVal =
-        dimColumnDataChunk.getCompleteDataChunk();
-    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    int[] columnIndexArray = dimColumnDataChunk.getAttributes().getInvertedIndexes();
-    int[] columnReverseIndexArray = dimColumnDataChunk.getAttributes().getInvertedIndexesReverse();
-    for (int i = 0; i < filterValues.length; i++) {
-      byte[] filterVal = filterValues[i];
-      if (null != listOfColumnarKeyBlockDataForNoDictionaryVal) {
-
-        if (null != columnReverseIndexArray) {
-          for (int index : columnIndexArray) {
-            byte[] noDictionaryVal =
-                listOfColumnarKeyBlockDataForNoDictionaryVal.get(columnReverseIndexArray[index]);
-            if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, noDictionaryVal) == 0) {
-              bitSet.flip(index);
-            }
-          }
-        } else if (null != columnIndexArray) {
-
-          for (int index : columnIndexArray) {
-            byte[] noDictionaryVal =
-                listOfColumnarKeyBlockDataForNoDictionaryVal.get(columnIndexArray[index]);
-            if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, noDictionaryVal) == 0) {
-              bitSet.flip(index);
-            }
-          }
-        } else {
-          for (int index = 0;
-               index < listOfColumnarKeyBlockDataForNoDictionaryVal.size(); index++) {
-            if (ByteUtil.UnsafeComparer.INSTANCE
-                .compareTo(filterVal, listOfColumnarKeyBlockDataForNoDictionaryVal.get(index))
-                == 0) {
-              bitSet.flip(index);
-            }
-          }
-
-        }
-
-      }
-    }
-    return bitSet;
-
-  }
-
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      FixedLengthDimensionDataChunk dimColumnDataChunk, int numerOfRows) {
-    int[] columnIndex = dimColumnDataChunk.getAttributes().getInvertedIndexes();
-    int startKey = 0;
-    int last = 0;
-    int startIndex = 0;
-    BitSet bitSet = new BitSet(numerOfRows);
-    bitSet.flip(0, numerOfRows);
-    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    for (int i = 0; i < filterValues.length; i++) {
-      startKey = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[i], false);
-      if (startKey < 0) {
-        continue;
-      }
-      bitSet.flip(columnIndex[startKey]);
-      last = startKey;
-      for (int j = startKey + 1; j < numerOfRows; j++) {
-        if (ByteUtil.UnsafeComparer.INSTANCE
-            .compareTo(dimColumnDataChunk.getCompleteDataChunk(), j * filterValues[i].length,
-                filterValues[i].length, filterValues[i], 0, filterValues[i].length) == 0) {
-          bitSet.flip(columnIndex[j]);
-          last++;
-        } else {
-          break;
-        }
-      }
-      startIndex = last;
-      if (startIndex >= numerOfRows) {
-        break;
-      }
-    }
-    return bitSet;
-  }
-
-  private BitSet setFilterdIndexToBitSet(FixedLengthDimensionDataChunk dimColumnDataChunk,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    bitSet.flip(0, numerOfRows);
-    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    for (int k = 0; k < filterValues.length; k++) {
-      for (int j = 0; j < numerOfRows; j++) {
-        if (ByteUtil.UnsafeComparer.INSTANCE
-            .compareTo(dimColumnDataChunk.getCompleteDataChunk(), j * filterValues[k].length,
-                filterValues[k].length, filterValues[k], 0, filterValues[k].length) == 0) {
-          bitSet.flip(j);
-        }
-      }
-    }
-    return bitSet;
-  }
-
-  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
-    BitSet bitSet = new BitSet(1);
-    bitSet.flip(0, 1);
-    return bitSet;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/FilterExecuter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/FilterExecuter.java b/core/src/main/java/org/carbondata/scan/filter/executer/FilterExecuter.java
deleted file mode 100644
index 42f913b..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/FilterExecuter.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.BitSet;
-
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public interface FilterExecuter {
-
-  /**
-   * API will apply filter based on resolver instance
-   *
-   * @return
-   * @throws FilterUnsupportedException
-   */
-  BitSet applyFilter(BlocksChunkHolder blocksChunkHolder) throws FilterUnsupportedException;
-
-  /**
-   * API will verify whether the block can be shortlisted based on block
-   * max and min key.
-   *
-   * @param blockMaxValue, maximum value of the
-   * @param blockMinValue
-   * @return BitSet
-   */
-  BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue);
-}


[20/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/datachunk/DataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/datachunk/DataChunk.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/datachunk/DataChunk.java
deleted file mode 100644
index dbbed6d..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/datachunk/DataChunk.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.blocklet.datachunk;
-
-import java.io.Serializable;
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.blocklet.compressor.ChunkCompressorMeta;
-import org.carbondata.core.carbon.metadata.blocklet.sort.SortState;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.metadata.ValueEncoderMeta;
-
-/**
- * Class holds the information about the data chunk metadata
- */
-public class DataChunk implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * the compression meta data of a chunk
-   */
-  private ChunkCompressorMeta chunkCompressionMeta;
-
-  /**
-   * whether this chunk is a row chunk or column chunk
-   */
-  private boolean isRowMajor;
-
-  /**
-   * the column IDs in this chunk, will have atleast
-   * one column ID for columnar format, many column ID for
-   * row major format
-   */
-  private List<Integer> columnUniqueIdList;
-
-  /**
-   * Offset of data page
-   */
-  private long dataPageOffset;
-
-  /**
-   * length of data page
-   */
-  private int dataPageLength;
-
-  /**
-   * information about presence of values in each row of this column chunk
-   */
-  private transient PresenceMeta nullValueIndexForColumn;
-
-  /**
-   * offset of row id page, only if encoded using inverted index
-   */
-  private long rowIdPageOffset;
-
-  /**
-   * length of row id page, only if encoded using inverted index
-   */
-  private int rowIdPageLength;
-
-  /**
-   * offset of rle page, only if RLE coded.
-   */
-  private long rlePageOffset;
-
-  /**
-   * length of rle page, only if RLE coded.
-   */
-  private int rlePageLength;
-
-  /**
-   * is rle is applied in the data chunk
-   */
-  private boolean isRleApplied;
-
-  /**
-   * is dictionary is applied in the column, only if it is a dimension column
-   */
-  private boolean isNoDictonaryColumn;
-
-  /**
-   * sorting type selected for chunk;
-   */
-  private SortState sortState;
-
-  /**
-   * The List of encoders overriden at node level
-   */
-  private List<Encoding> encodingList;
-
-  /**
-   * value encoder meta which will holds the information
-   * about max, min, decimal length, type
-   */
-  private List<ValueEncoderMeta> valueEncoderMetaList;
-
-  /**
-   * @return the chunkCompressionMeta
-   */
-  public ChunkCompressorMeta getChunkCompressionMeta() {
-    return chunkCompressionMeta;
-  }
-
-  /**
-   * @param chunkCompressionMeta the chunkCompressionMeta to set
-   */
-  public void setChunkCompressionMeta(ChunkCompressorMeta chunkCompressionMeta) {
-    this.chunkCompressionMeta = chunkCompressionMeta;
-  }
-
-  /**
-   * @return the isRowMajor
-   */
-  public boolean isRowMajor() {
-    return isRowMajor;
-  }
-
-  /**
-   * @param isRowMajor the isRowMajor to set
-   */
-  public void setRowMajor(boolean isRowMajor) {
-    this.isRowMajor = isRowMajor;
-  }
-
-  /**
-   * @return the columnUniqueIdList
-   */
-  public List<Integer> getColumnUniqueIdList() {
-    return columnUniqueIdList;
-  }
-
-  /**
-   * @param columnUniqueIdList the columnUniqueIdList to set
-   */
-  public void setColumnUniqueIdList(List<Integer> columnUniqueIdList) {
-    this.columnUniqueIdList = columnUniqueIdList;
-  }
-
-  /**
-   * @return the dataPageOffset
-   */
-  public long getDataPageOffset() {
-    return dataPageOffset;
-  }
-
-  /**
-   * @param dataPageOffset the dataPageOffset to set
-   */
-  public void setDataPageOffset(long dataPageOffset) {
-    this.dataPageOffset = dataPageOffset;
-  }
-
-  /**
-   * @return the dataPageLength
-   */
-  public int getDataPageLength() {
-    return dataPageLength;
-  }
-
-  /**
-   * @param dataPageLength the dataPageLength to set
-   */
-  public void setDataPageLength(int dataPageLength) {
-    this.dataPageLength = dataPageLength;
-  }
-
-  /**
-   * @return the nullValueIndexForColumn
-   */
-  public PresenceMeta getNullValueIndexForColumn() {
-    return nullValueIndexForColumn;
-  }
-
-  /**
-   * @param nullValueIndexForColumn the nullValueIndexForColumn to set
-   */
-  public void setNullValueIndexForColumn(PresenceMeta nullValueIndexForColumn) {
-    this.nullValueIndexForColumn = nullValueIndexForColumn;
-  }
-
-  /**
-   * @return the rowIdPageOffset
-   */
-  public long getRowIdPageOffset() {
-    return rowIdPageOffset;
-  }
-
-  /**
-   * @param rowIdPageOffset the rowIdPageOffset to set
-   */
-  public void setRowIdPageOffset(long rowIdPageOffset) {
-    this.rowIdPageOffset = rowIdPageOffset;
-  }
-
-  /**
-   * @return the rowIdPageLength
-   */
-  public int getRowIdPageLength() {
-    return rowIdPageLength;
-  }
-
-  /**
-   * @param rowIdPageLength the rowIdPageLength to set
-   */
-  public void setRowIdPageLength(int rowIdPageLength) {
-    this.rowIdPageLength = rowIdPageLength;
-  }
-
-  /**
-   * @return the rlePageOffset
-   */
-  public long getRlePageOffset() {
-    return rlePageOffset;
-  }
-
-  /**
-   * @param rlePageOffset the rlePageOffset to set
-   */
-  public void setRlePageOffset(long rlePageOffset) {
-    this.rlePageOffset = rlePageOffset;
-  }
-
-  /**
-   * @return the rlePageLength
-   */
-  public int getRlePageLength() {
-    return rlePageLength;
-  }
-
-  /**
-   * @param rlePageLength the rlePageLength to set
-   */
-  public void setRlePageLength(int rlePageLength) {
-    this.rlePageLength = rlePageLength;
-  }
-
-  /**
-   * @return the isRleApplied
-   */
-  public boolean isRleApplied() {
-    return isRleApplied;
-  }
-
-  /**
-   * @param isRleApplied the isRleApplied to set
-   */
-  public void setRleApplied(boolean isRleApplied) {
-    this.isRleApplied = isRleApplied;
-  }
-
-  /**
-   * @return the isNoDictonaryColumn
-   */
-  public boolean isNoDictonaryColumn() {
-    return isNoDictonaryColumn;
-  }
-
-  /**
-   * @param isNoDictonaryColumn the isNoDictonaryColumn to set
-   */
-  public void setNoDictonaryColumn(boolean isNoDictonaryColumn) {
-    this.isNoDictonaryColumn = isNoDictonaryColumn;
-  }
-
-  /**
-   * @return the sortState
-   */
-  public SortState getSortState() {
-    return sortState;
-  }
-
-  /**
-   * @param sortState the sortState to set
-   */
-  public void setSortState(SortState sortState) {
-    this.sortState = sortState;
-  }
-
-  /**
-   * @return the encoderList
-   */
-  public List<Encoding> getEncodingList() {
-    return encodingList;
-  }
-
-  /**
-   * @param encoderList the encoderList to set
-   */
-  public void setEncoderList(List<Encoding> encodingList) {
-    this.encodingList = encodingList;
-  }
-
-  /**
-   * @return the valueEncoderMeta
-   */
-  public List<ValueEncoderMeta> getValueEncoderMeta() {
-    return valueEncoderMetaList;
-  }
-
-  /**
-   * @param valueEncoderMeta the valueEncoderMeta to set
-   */
-  public void setValueEncoderMeta(List<ValueEncoderMeta> valueEncoderMetaList) {
-    this.valueEncoderMetaList = valueEncoderMetaList;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/datachunk/PresenceMeta.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/datachunk/PresenceMeta.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/datachunk/PresenceMeta.java
deleted file mode 100644
index 0726c1e..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/datachunk/PresenceMeta.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.metadata.blocklet.datachunk;
-
-import java.util.BitSet;
-
-/**
- * information about presence of values in each row of the column chunk
- */
-public class PresenceMeta {
-
-  /**
-   * if true, ones in the bit stream reprents presence. otherwise represents absence
-   */
-  private boolean representNullValues;
-
-  /**
-   * Compressed bit stream representing the presence of null values
-   */
-  private BitSet bitSet;
-
-  /**
-   * @return the representNullValues
-   */
-  public boolean isRepresentNullValues() {
-    return representNullValues;
-  }
-
-  /**
-   * @param representNullValues the representNullValues to set
-   */
-  public void setRepresentNullValues(boolean representNullValues) {
-    this.representNullValues = representNullValues;
-  }
-
-  /**
-   * @return the bitSet
-   */
-  public BitSet getBitSet() {
-    return bitSet;
-  }
-
-  /**
-   * @param bitSet the bitSet to set
-   */
-  public void setBitSet(BitSet bitSet) {
-    this.bitSet = bitSet;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletBTreeIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletBTreeIndex.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletBTreeIndex.java
deleted file mode 100644
index ed0826e..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletBTreeIndex.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.blocklet.index;
-
-import java.io.Serializable;
-
-/**
- * Class hold the information about start and end key of one blocklet
- */
-public class BlockletBTreeIndex implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = 6116185464700853045L;
-
-  /**
-   * Bit-packed start key of one blocklet
-   */
-  private byte[] startKey;
-
-  /**
-   * Bit-packed start key of one blocklet
-   */
-  private byte[] endKey;
-
-  public BlockletBTreeIndex() {
-  }
-
-  public BlockletBTreeIndex(byte[] startKey, byte[] endKey) {
-    this.startKey = startKey;
-    this.endKey = endKey;
-  }
-
-  /**
-   * @return the startKey
-   */
-  public byte[] getStartKey() {
-    return startKey;
-  }
-
-  /**
-   * @param startKey the startKey to set
-   */
-  public void setStartKey(byte[] startKey) {
-    this.startKey = startKey;
-  }
-
-  /**
-   * @return the endKey
-   */
-  public byte[] getEndKey() {
-    return endKey;
-  }
-
-  /**
-   * @param endKey the endKey to set
-   */
-  public void setEndKey(byte[] endKey) {
-    this.endKey = endKey;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletIndex.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletIndex.java
deleted file mode 100644
index a396795..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletIndex.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.blocklet.index;
-
-import java.io.Serializable;
-
-/**
- * Persist Index of all blocklets in one file
- */
-public class BlockletIndex implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * list of btree index for all the leaf
-   */
-  private BlockletBTreeIndex btreeIndex;
-
-  /**
-   * list of max and min key of all leaf
-   */
-  private BlockletMinMaxIndex minMaxIndex;
-
-  public BlockletIndex() {
-  }
-
-  public BlockletIndex(BlockletBTreeIndex btree, BlockletMinMaxIndex minmax) {
-    this.btreeIndex = btree;
-    this.minMaxIndex = minmax;
-  }
-
-  /**
-   * @return the btreeIndex
-   */
-  public BlockletBTreeIndex getBtreeIndex() {
-    return btreeIndex;
-  }
-
-  /**
-   * @param btreeIndex the btreeIndex to set
-   */
-  public void setBtreeIndex(BlockletBTreeIndex btreeIndex) {
-    this.btreeIndex = btreeIndex;
-  }
-
-  /**
-   * @return the minMaxIndex
-   */
-  public BlockletMinMaxIndex getMinMaxIndex() {
-    return minMaxIndex;
-  }
-
-  /**
-   * @param minMaxIndex the minMaxIndex to set
-   */
-  public void setMinMaxIndex(BlockletMinMaxIndex minMaxIndex) {
-    this.minMaxIndex = minMaxIndex;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletMinMaxIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletMinMaxIndex.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletMinMaxIndex.java
deleted file mode 100644
index 822cd5b..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/index/BlockletMinMaxIndex.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.blocklet.index;
-
-import java.io.Serializable;
-import java.nio.ByteBuffer;
-import java.util.List;
-
-/**
- * Below class holds the information of max and min value of all the columns in a blocklet
- */
-public class BlockletMinMaxIndex implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = -4311405145501302895L;
-
-  /**
-   * Min value of all columns of one blocklet Bit-Packed
-   */
-  private byte[][] minValues;
-
-  /**
-   * Max value of all columns of one blocklet Bit-Packed
-   */
-  private byte[][] maxValues;
-
-  public BlockletMinMaxIndex() {
-  }
-
-  public BlockletMinMaxIndex(List<ByteBuffer> minValues, List<ByteBuffer> maxValues) {
-    this.minValues = new byte[minValues.size()][];
-    this.maxValues = new byte[maxValues.size()][];
-    for (int i = 0; i < minValues.size(); i++) {
-      this.minValues[i] = minValues.get(i).array();
-      this.maxValues[i] = maxValues.get(i).array();
-    }
-  }
-
-  /**
-   * @return the minValues
-   */
-  public byte[][] getMinValues() {
-    return minValues;
-  }
-
-  /**
-   * @param minValues the minValues to set
-   */
-  public void setMinValues(byte[][] minValues) {
-    this.minValues = minValues;
-  }
-
-  /**
-   * @return the maxValues
-   */
-  public byte[][] getMaxValues() {
-    return maxValues;
-  }
-
-  /**
-   * @param maxValues the maxValues to set
-   */
-  public void setMaxValues(byte[][] maxValues) {
-    this.maxValues = maxValues;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/sort/SortState.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/sort/SortState.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/sort/SortState.java
deleted file mode 100644
index 6e6d683..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/sort/SortState.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.blocklet.sort;
-
-/**
- * Enum for sort type information
- */
-public enum SortState {
-
-  /**
-   * column is not sorted
-   */
-  SORT_NONE,
-
-  /**
-   * data from source was already in sorted order
-   */
-  SORT_NATIVE,
-
-  /**
-   * data from source was not sorted,so data is explicitly sorted
-   */
-  SORT_EXPLICT;
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/converter/SchemaConverter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/converter/SchemaConverter.java b/core/src/main/java/org/carbondata/core/carbon/metadata/converter/SchemaConverter.java
deleted file mode 100644
index bb10258..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/converter/SchemaConverter.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.converter;
-
-import org.carbondata.core.carbon.metadata.schema.SchemaEvolution;
-import org.carbondata.core.carbon.metadata.schema.SchemaEvolutionEntry;
-import org.carbondata.core.carbon.metadata.schema.table.TableInfo;
-import org.carbondata.core.carbon.metadata.schema.table.TableSchema;
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-
-/**
- * Converter interface which will be implemented for external to carbon schema
- */
-public interface SchemaConverter {
-  /**
-   * @param wrapperSchemaEvolutionEntry
-   * @return
-   */
-  org.carbondata.format.SchemaEvolutionEntry fromWrapperToExternalSchemaEvolutionEntry(
-      SchemaEvolutionEntry wrapperSchemaEvolutionEntry);
-
-  /**
-   * @param wrapperSchemaEvolution
-   * @return
-   */
-  org.carbondata.format.SchemaEvolution fromWrapperToExternalSchemaEvolution(
-      SchemaEvolution wrapperSchemaEvolution);
-
-  /**
-   * @param wrapperColumnSchema
-   * @return
-   */
-  org.carbondata.format.ColumnSchema fromWrapperToExternalColumnSchema(
-      ColumnSchema wrapperColumnSchema);
-
-  /**
-   * @param wrapperTableSchema
-   * @return
-   */
-  org.carbondata.format.TableSchema fromWrapperToExternalTableSchema(
-      TableSchema wrapperTableSchema);
-
-  /**
-   * @param wrapperTableInfo
-   * @param dbName
-   * @param tableName
-   * @return
-   */
-  org.carbondata.format.TableInfo fromWrapperToExternalTableInfo(TableInfo wrapperTableInfo,
-      String dbName, String tableName);
-
-  /**
-   * @param externalSchemaEvolutionEntry
-   * @return
-   */
-  SchemaEvolutionEntry fromExternalToWrapperSchemaEvolutionEntry(
-      org.carbondata.format.SchemaEvolutionEntry externalSchemaEvolutionEntry);
-
-  /**
-   * @param externalSchemaEvolution
-   * @return
-   */
-  SchemaEvolution fromExternalToWrapperSchemaEvolution(
-      org.carbondata.format.SchemaEvolution externalSchemaEvolution);
-
-  /**
-   * @param externalColumnSchema
-   * @return
-   */
-  ColumnSchema fromExternalToWrapperColumnSchema(
-      org.carbondata.format.ColumnSchema externalColumnSchema);
-
-  /**
-   * @param externalTableSchema
-   * @param tableNam
-   * @return
-   */
-  TableSchema fromExternalToWrapperTableSchema(
-      org.carbondata.format.TableSchema externalTableSchema, String tableNam);
-
-  /**
-   * @param externalTableInfo
-   * @param dbName
-   * @param tableName
-   * @return
-   */
-  TableInfo fromExternalToWrapperTableInfo(org.carbondata.format.TableInfo externalTableInfo,
-      String dbName, String tableName, String storePath);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/converter/ThriftWrapperSchemaConverterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/converter/ThriftWrapperSchemaConverterImpl.java b/core/src/main/java/org/carbondata/core/carbon/metadata/converter/ThriftWrapperSchemaConverterImpl.java
deleted file mode 100644
index c33c008..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/converter/ThriftWrapperSchemaConverterImpl.java
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.converter;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.SchemaEvolution;
-import org.carbondata.core.carbon.metadata.schema.SchemaEvolutionEntry;
-import org.carbondata.core.carbon.metadata.schema.table.TableInfo;
-import org.carbondata.core.carbon.metadata.schema.table.TableSchema;
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-
-/**
- * Thrift schema to carbon schema converter and vice versa
- */
-public class ThriftWrapperSchemaConverterImpl implements SchemaConverter {
-
-  /* (non-Javadoc)
-   * Converts  from wrapper to thrift schema evolution entry
-   */
-  @Override
-  public org.carbondata.format.SchemaEvolutionEntry fromWrapperToExternalSchemaEvolutionEntry(
-      SchemaEvolutionEntry wrapperSchemaEvolutionEntry) {
-    org.carbondata.format.SchemaEvolutionEntry thriftSchemaEvolutionEntry =
-        new org.carbondata.format.SchemaEvolutionEntry(wrapperSchemaEvolutionEntry.getTimeStamp());
-
-    List<org.carbondata.format.ColumnSchema> thriftAddedColumns =
-        new ArrayList<org.carbondata.format.ColumnSchema>();
-    for (ColumnSchema wrapperColumnSchema : wrapperSchemaEvolutionEntry.getAdded()) {
-      thriftAddedColumns.add(fromWrapperToExternalColumnSchema(wrapperColumnSchema));
-    }
-
-    List<org.carbondata.format.ColumnSchema> thriftRemovedColumns =
-        new ArrayList<org.carbondata.format.ColumnSchema>();
-    for (ColumnSchema wrapperColumnSchema : wrapperSchemaEvolutionEntry.getRemoved()) {
-      thriftRemovedColumns.add(fromWrapperToExternalColumnSchema(wrapperColumnSchema));
-    }
-
-    thriftSchemaEvolutionEntry.setAdded(thriftAddedColumns);
-    thriftSchemaEvolutionEntry.setRemoved(thriftRemovedColumns);
-    return thriftSchemaEvolutionEntry;
-  }
-
-  /* (non-Javadoc)
-   * converts from wrapper to thrift schema evolution
-   */
-  @Override public org.carbondata.format.SchemaEvolution fromWrapperToExternalSchemaEvolution(
-      SchemaEvolution wrapperSchemaEvolution) {
-
-    List<org.carbondata.format.SchemaEvolutionEntry> thriftSchemaEvolEntryList =
-        new ArrayList<org.carbondata.format.SchemaEvolutionEntry>();
-    for (SchemaEvolutionEntry schemaEvolutionEntry : wrapperSchemaEvolution
-        .getSchemaEvolutionEntryList()) {
-      thriftSchemaEvolEntryList
-          .add(fromWrapperToExternalSchemaEvolutionEntry(schemaEvolutionEntry));
-    }
-    return new org.carbondata.format.SchemaEvolution(thriftSchemaEvolEntryList);
-  }
-
-
-  /**
-   * converts from wrapper to external encoding
-   *
-   * @param encoder
-   * @return
-   */
-  private org.carbondata.format.Encoding fromWrapperToExternalEncoding(Encoding encoder) {
-
-    if (null == encoder) {
-      return null;
-    }
-
-    switch (encoder) {
-      case DICTIONARY:
-        return org.carbondata.format.Encoding.DICTIONARY;
-      case DELTA:
-        return org.carbondata.format.Encoding.DELTA;
-      case RLE:
-        return org.carbondata.format.Encoding.RLE;
-      case INVERTED_INDEX:
-        return org.carbondata.format.Encoding.INVERTED_INDEX;
-      case BIT_PACKED:
-        return org.carbondata.format.Encoding.BIT_PACKED;
-      case DIRECT_DICTIONARY:
-        return org.carbondata.format.Encoding.DIRECT_DICTIONARY;
-      default:
-        return org.carbondata.format.Encoding.DICTIONARY;
-    }
-  }
-
-  /**
-   * convert from wrapper to external data type
-   *
-   * @param dataType
-   * @return
-   */
-  private org.carbondata.format.DataType fromWrapperToExternalDataType(DataType dataType) {
-
-    if (null == dataType) {
-      return null;
-    }
-    switch (dataType) {
-      case STRING:
-        return org.carbondata.format.DataType.STRING;
-      case INT:
-        return org.carbondata.format.DataType.INT;
-      case SHORT:
-        return org.carbondata.format.DataType.SHORT;
-      case LONG:
-        return org.carbondata.format.DataType.LONG;
-      case DOUBLE:
-        return org.carbondata.format.DataType.DOUBLE;
-      case DECIMAL:
-        return org.carbondata.format.DataType.DECIMAL;
-      case TIMESTAMP:
-        return org.carbondata.format.DataType.TIMESTAMP;
-      case ARRAY:
-        return org.carbondata.format.DataType.ARRAY;
-      case STRUCT:
-        return org.carbondata.format.DataType.STRUCT;
-      default:
-        return org.carbondata.format.DataType.STRING;
-    }
-  }
-
-  /* (non-Javadoc)
-   * convert from wrapper to external column schema
-   */
-  @Override public org.carbondata.format.ColumnSchema fromWrapperToExternalColumnSchema(
-      ColumnSchema wrapperColumnSchema) {
-
-    List<org.carbondata.format.Encoding> encoders = new ArrayList<org.carbondata.format.Encoding>();
-    for (Encoding encoder : wrapperColumnSchema.getEncodingList()) {
-      encoders.add(fromWrapperToExternalEncoding(encoder));
-    }
-    org.carbondata.format.ColumnSchema thriftColumnSchema = new org.carbondata.format.ColumnSchema(
-        fromWrapperToExternalDataType(wrapperColumnSchema.getDataType()),
-        wrapperColumnSchema.getColumnName(), wrapperColumnSchema.getColumnUniqueId(),
-        wrapperColumnSchema.isColumnar(), encoders, wrapperColumnSchema.isDimensionColumn());
-    thriftColumnSchema.setColumn_group_id(wrapperColumnSchema.getColumnGroupId());
-    thriftColumnSchema.setScale(wrapperColumnSchema.getScale());
-    thriftColumnSchema.setPrecision(wrapperColumnSchema.getPrecision());
-    thriftColumnSchema.setNum_child(wrapperColumnSchema.getNumberOfChild());
-    thriftColumnSchema.setDefault_value(wrapperColumnSchema.getDefaultValue());
-    thriftColumnSchema.setColumnProperties(wrapperColumnSchema.getColumnProperties());
-    thriftColumnSchema.setInvisible(wrapperColumnSchema.isInvisible());
-    thriftColumnSchema.setColumnReferenceId(wrapperColumnSchema.getColumnReferenceId());
-    return thriftColumnSchema;
-  }
-
-  /* (non-Javadoc)
-   * convert from wrapper to external tableschema
-   */
-  @Override public org.carbondata.format.TableSchema fromWrapperToExternalTableSchema(
-      TableSchema wrapperTableSchema) {
-
-    List<org.carbondata.format.ColumnSchema> thriftColumnSchema =
-        new ArrayList<org.carbondata.format.ColumnSchema>();
-    for (ColumnSchema wrapperColumnSchema : wrapperTableSchema.getListOfColumns()) {
-      thriftColumnSchema.add(fromWrapperToExternalColumnSchema(wrapperColumnSchema));
-    }
-    org.carbondata.format.SchemaEvolution schemaEvolution =
-        fromWrapperToExternalSchemaEvolution(wrapperTableSchema.getSchemaEvalution());
-    return new org.carbondata.format.TableSchema(wrapperTableSchema.getTableId(),
-        thriftColumnSchema, schemaEvolution);
-  }
-
-  /* (non-Javadoc)
-   * convert from wrapper to external tableinfo
-   */
-  @Override public org.carbondata.format.TableInfo fromWrapperToExternalTableInfo(
-      TableInfo wrapperTableInfo, String dbName, String tableName) {
-
-    org.carbondata.format.TableSchema thriftFactTable =
-        fromWrapperToExternalTableSchema(wrapperTableInfo.getFactTable());
-    List<org.carbondata.format.TableSchema> thriftAggTables =
-        new ArrayList<org.carbondata.format.TableSchema>();
-    for (TableSchema wrapperAggTableSchema : wrapperTableInfo.getAggregateTableList()) {
-      thriftAggTables.add(fromWrapperToExternalTableSchema(wrapperAggTableSchema));
-    }
-    return new org.carbondata.format.TableInfo(thriftFactTable, thriftAggTables);
-  }
-
-  /* (non-Javadoc)
-   * convert from external to wrapper schema evolution entry
-   */
-  @Override public SchemaEvolutionEntry fromExternalToWrapperSchemaEvolutionEntry(
-      org.carbondata.format.SchemaEvolutionEntry externalSchemaEvolutionEntry) {
-
-    SchemaEvolutionEntry wrapperSchemaEvolutionEntry = new SchemaEvolutionEntry();
-    wrapperSchemaEvolutionEntry.setTimeStamp(externalSchemaEvolutionEntry.getTime_stamp());
-
-    List<ColumnSchema> wrapperAddedColumns = new ArrayList<ColumnSchema>();
-    if (null != externalSchemaEvolutionEntry.getAdded()) {
-      for (org.carbondata.format.ColumnSchema externalColumnSchema : externalSchemaEvolutionEntry
-          .getAdded()) {
-        wrapperAddedColumns.add(fromExternalToWrapperColumnSchema(externalColumnSchema));
-      }
-    }
-    List<ColumnSchema> wrapperRemovedColumns = new ArrayList<ColumnSchema>();
-    if (null != externalSchemaEvolutionEntry.getRemoved()) {
-      for (org.carbondata.format.ColumnSchema externalColumnSchema : externalSchemaEvolutionEntry
-          .getRemoved()) {
-        wrapperRemovedColumns.add(fromExternalToWrapperColumnSchema(externalColumnSchema));
-      }
-    }
-
-    wrapperSchemaEvolutionEntry.setAdded(wrapperAddedColumns);
-    wrapperSchemaEvolutionEntry.setRemoved(wrapperRemovedColumns);
-    return wrapperSchemaEvolutionEntry;
-
-  }
-
-  /* (non-Javadoc)
-   * convert from external to wrapper schema evolution
-   */
-  @Override public SchemaEvolution fromExternalToWrapperSchemaEvolution(
-      org.carbondata.format.SchemaEvolution externalSchemaEvolution) {
-    List<SchemaEvolutionEntry> wrapperSchemaEvolEntryList = new ArrayList<SchemaEvolutionEntry>();
-    for (org.carbondata.format.SchemaEvolutionEntry schemaEvolutionEntry : externalSchemaEvolution
-        .getSchema_evolution_history()) {
-      wrapperSchemaEvolEntryList
-          .add(fromExternalToWrapperSchemaEvolutionEntry(schemaEvolutionEntry));
-    }
-    SchemaEvolution wrapperSchemaEvolution = new SchemaEvolution();
-    wrapperSchemaEvolution.setSchemaEvolutionEntryList(wrapperSchemaEvolEntryList);
-    return wrapperSchemaEvolution;
-  }
-
-  /**
-   * convert from external to wrapper encoding
-   *
-   * @param encoder
-   * @return
-   */
-  private Encoding fromExternalToWrapperEncoding(org.carbondata.format.Encoding encoder) {
-    if (null == encoder) {
-      return null;
-    }
-    switch (encoder) {
-      case DICTIONARY:
-        return Encoding.DICTIONARY;
-      case DELTA:
-        return Encoding.DELTA;
-      case RLE:
-        return Encoding.RLE;
-      case INVERTED_INDEX:
-        return Encoding.INVERTED_INDEX;
-      case BIT_PACKED:
-        return Encoding.BIT_PACKED;
-      case DIRECT_DICTIONARY:
-        return Encoding.DIRECT_DICTIONARY;
-      default:
-        return Encoding.DICTIONARY;
-    }
-  }
-
-  /**
-   * convert from external to wrapper data type
-   *
-   * @param dataType
-   * @return
-   */
-  private DataType fromExternalToWrapperDataType(org.carbondata.format.DataType dataType) {
-    if (null == dataType) {
-      return null;
-    }
-    switch (dataType) {
-      case STRING:
-        return DataType.STRING;
-      case INT:
-        return DataType.INT;
-      case SHORT:
-        return DataType.SHORT;
-      case LONG:
-        return DataType.LONG;
-      case DOUBLE:
-        return DataType.DOUBLE;
-      case DECIMAL:
-        return DataType.DECIMAL;
-      case TIMESTAMP:
-        return DataType.TIMESTAMP;
-      case ARRAY:
-        return DataType.ARRAY;
-      case STRUCT:
-        return DataType.STRUCT;
-      default:
-        return DataType.STRING;
-    }
-  }
-
-  /* (non-Javadoc)
-   * convert from external to wrapper columnschema
-   */
-  @Override public ColumnSchema fromExternalToWrapperColumnSchema(
-      org.carbondata.format.ColumnSchema externalColumnSchema) {
-    ColumnSchema wrapperColumnSchema = new ColumnSchema();
-    wrapperColumnSchema.setColumnUniqueId(externalColumnSchema.getColumn_id());
-    wrapperColumnSchema.setColumnName(externalColumnSchema.getColumn_name());
-    wrapperColumnSchema.setColumnar(externalColumnSchema.isColumnar());
-    wrapperColumnSchema.setDataType(fromExternalToWrapperDataType(externalColumnSchema.data_type));
-    wrapperColumnSchema.setDimensionColumn(externalColumnSchema.isDimension());
-    List<Encoding> encoders = new ArrayList<Encoding>();
-    for (org.carbondata.format.Encoding encoder : externalColumnSchema.getEncoders()) {
-      encoders.add(fromExternalToWrapperEncoding(encoder));
-    }
-    wrapperColumnSchema.setEncodingList(encoders);
-    wrapperColumnSchema.setNumberOfChild(externalColumnSchema.getNum_child());
-    wrapperColumnSchema.setPrecision(externalColumnSchema.getPrecision());
-    wrapperColumnSchema.setColumnGroup(externalColumnSchema.getColumn_group_id());
-    wrapperColumnSchema.setScale(externalColumnSchema.getScale());
-    wrapperColumnSchema.setDefaultValue(externalColumnSchema.getDefault_value());
-    wrapperColumnSchema.setAggregateFunction(externalColumnSchema.getAggregate_function());
-    wrapperColumnSchema.setColumnProperties(externalColumnSchema.getColumnProperties());
-    wrapperColumnSchema.setInvisible(externalColumnSchema.isInvisible());
-    wrapperColumnSchema.setColumnReferenceId(externalColumnSchema.getColumnReferenceId());
-    return wrapperColumnSchema;
-  }
-
-  /* (non-Javadoc)
-   * convert from external to wrapper tableschema
-   */
-  @Override public TableSchema fromExternalToWrapperTableSchema(
-      org.carbondata.format.TableSchema externalTableSchema, String tableName) {
-    TableSchema wrapperTableSchema = new TableSchema();
-    wrapperTableSchema.setTableId(externalTableSchema.getTable_id());
-    wrapperTableSchema.setTableName(tableName);
-    List<ColumnSchema> listOfColumns = new ArrayList<ColumnSchema>();
-    for (org.carbondata.format.ColumnSchema externalColumnSchema : externalTableSchema
-        .getTable_columns()) {
-      listOfColumns.add(fromExternalToWrapperColumnSchema(externalColumnSchema));
-    }
-    wrapperTableSchema.setListOfColumns(listOfColumns);
-    wrapperTableSchema.setSchemaEvalution(
-        fromExternalToWrapperSchemaEvolution(externalTableSchema.getSchema_evolution()));
-    return wrapperTableSchema;
-  }
-
-  /* (non-Javadoc)
-   * convert from external to wrapper tableinfo
-   */
-  @Override public TableInfo fromExternalToWrapperTableInfo(
-      org.carbondata.format.TableInfo externalTableInfo, String dbName, String tableName,
-      String storePath) {
-    TableInfo wrapperTableInfo = new TableInfo();
-    wrapperTableInfo.setLastUpdatedTime(
-        externalTableInfo.getFact_table().getSchema_evolution().getSchema_evolution_history().get(0)
-            .getTime_stamp());
-    wrapperTableInfo.setDatabaseName(dbName);
-    wrapperTableInfo.setTableUniqueName(dbName + "_" + tableName);
-    wrapperTableInfo.setStorePath(storePath);
-    wrapperTableInfo.setFactTable(
-        fromExternalToWrapperTableSchema(externalTableInfo.getFact_table(), tableName));
-    List<TableSchema> aggTablesList = new ArrayList<TableSchema>();
-    int index = 0;
-    for (org.carbondata.format.TableSchema aggTable : externalTableInfo.getAggregate_table_list()) {
-      aggTablesList.add(fromExternalToWrapperTableSchema(aggTable, "agg_table_" + index));
-      index++;
-    }
-    return wrapperTableInfo;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/datatype/ConvertedType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/datatype/ConvertedType.java b/core/src/main/java/org/carbondata/core/carbon/metadata/datatype/ConvertedType.java
deleted file mode 100644
index bbf71bc..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/datatype/ConvertedType.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.datatype;
-
-public enum ConvertedType {
-
-  /**
-   * a BYTE_ARRAY actually contains UTF8 encoded chars
-   */
-  UTF8,
-  /**
-   * a map is converted as an optional field containing a repeated key/value pair
-   */
-  MAP,
-  /**
-   * a key/value pair is converted into a group of two fields
-   */
-  MAP_KEY_VALUE,
-  /**
-   * a list is converted into an optional field containing a repeated field for its
-   * values
-   */
-  LIST,
-  /**
-   * an enum is converted into a binary field
-   */
-  ENUM,
-  /**
-   * A decimal value.
-   * This may be used to annotate binary or fixed primitive types. The
-   * underlying byte array stores the unscaled value encoded as two's
-   * complement using big-endian byte order (the most significant byte is the
-   * zeroth element). The value of the decimal is the value * 10^{-scale}.
-   * This must be accompanied by a (maximum) precision and a scale in the
-   * SchemaElement. The precision specifies the number of digits in the decimal
-   * and the scale stores the location of the decimal point. For example 1.23
-   * would have precision 3 (3 total digits) and scale 2 (the decimal point is
-   * 2 digits over).
-   */
-  DECIMAL,
-  /**
-   * A Date
-   * Stored as days since Unix epoch, encoded as the INT32 physical type.
-   */
-  DATE,
-  /**
-   * A time
-   * The total number of milliseconds since midnight.  The value is stored
-   * as an INT32 physical type.
-   */
-  TIME_MILLIS,
-  /**
-   * A date/time combination
-   * Date and time recorded as milliseconds since the Unix epoch.  Recorded as
-   * a physical type of INT64.
-   */
-  TIMESTAMP_MILLIS,
-
-  RESERVED,
-  /**
-   * An unsigned integer value.
-   * The number describes the maximum number of meainful data bits in
-   * the stored value. 8, 16 and 32 bit values are stored using the
-   * INT32 physical type.  64 bit values are stored using the INT64
-   * physical type.
-   */
-  UINT_8,
-  UINT_16,
-  UINT_32,
-  UINT_64,
-  /**
-   * A signed integer value.
-   * The number describes the maximum number of meainful data bits in
-   * the stored value. 8, 16 and 32 bit values are stored using the
-   * INT32 physical type.  64 bit values are stored using the INT64
-   * physical type.
-   */
-  INT_8,
-  INT_16,
-  INT_32,
-  INT_64,
-  /**
-   * An embedded JSON document
-   * A JSON document embedded within a single UTF8 column.
-   */
-  JSON,
-
-  /**
-   * An embedded BSON document
-   * A BSON document embedded within a single BINARY column.
-   */
-  BSON,
-
-  /**
-   * An interval of time
-   * This type annotates data stored as a FIXED_LEN_BYTE_ARRAY of length 12
-   * This data is composed of three separate little endian unsigned
-   * integers.  Each stores a component of a duration of time.  The first
-   * integer identifies the number of months associated with the duration,
-   * the second identifies the number of days associated with the duration
-   * and the third identifies the number of milliseconds associated with
-   * the provided duration.  This duration of time is independent of any
-   * particular timezone or date.
-   */
-  INTERVAL;
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/datatype/DataType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/datatype/DataType.java b/core/src/main/java/org/carbondata/core/carbon/metadata/datatype/DataType.java
deleted file mode 100644
index 5fbe9cb..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/datatype/DataType.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.metadata.datatype;
-
-public enum DataType {
-
-  STRING(0),
-  DATE(1),
-  TIMESTAMP(2),
-  BOOLEAN(1),
-  SHORT(2),
-  INT(3),
-  FLOAT(4),
-  LONG(5),
-  DOUBLE(6),
-  NULL(7),
-  DECIMAL(8),
-  ARRAY(9),
-  STRUCT(10),
-  MAP(11);
-
-  private int presedenceOrder;
-
-  DataType(int value) {
-    this.presedenceOrder = value;
-  }
-
-  public int getPresedenceOrder() {
-    return presedenceOrder;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/encoder/Encoding.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/encoder/Encoding.java b/core/src/main/java/org/carbondata/core/carbon/metadata/encoder/Encoding.java
deleted file mode 100644
index bc1ecaa..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/encoder/Encoding.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.encoder;
-
-/**
- * Encoding type supported in carbon
- */
-public enum Encoding {
-  DICTIONARY,
-  DELTA,
-  RLE,
-  INVERTED_INDEX,
-  BIT_PACKED,
-  DIRECT_DICTIONARY;
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/index/BlockIndexInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/index/BlockIndexInfo.java b/core/src/main/java/org/carbondata/core/carbon/metadata/index/BlockIndexInfo.java
deleted file mode 100644
index bfed3dd..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/index/BlockIndexInfo.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.index;
-
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex;
-
-/**
- * Below class will be used hold the information
- * about block index
- */
-public class BlockIndexInfo {
-
-  /**
-   * total number of rows present in the file
-   */
-  private long numberOfRows;
-
-  /**
-   * file name
-   */
-  private String fileName;
-
-  /**
-   * offset of metadata in data file
-   */
-  private long offset;
-
-  /**
-   * to store min max and start and end key
-   */
-  private BlockletIndex blockletIndex;
-
-  /**
-   * Constructor
-   *
-   * @param numberOfRows  number of rows
-   * @param fileName      full qualified name
-   * @param offset        offset of the metadata in data file
-   * @param blockletIndex block let index
-   */
-  public BlockIndexInfo(long numberOfRows, String fileName, long offset,
-      BlockletIndex blockletIndex) {
-    this.numberOfRows = numberOfRows;
-    this.fileName = fileName;
-    this.offset = offset;
-    this.blockletIndex = blockletIndex;
-  }
-
-  /**
-   * @return the numberOfRows
-   */
-  public long getNumberOfRows() {
-    return numberOfRows;
-  }
-
-  /**
-   * @return the fileName
-   */
-  public String getFileName() {
-    return fileName;
-  }
-
-  /**
-   * @return the offset
-   */
-  public long getOffset() {
-    return offset;
-  }
-
-  /**
-   * @return the blockletIndex
-   */
-  public BlockletIndex getBlockletIndex() {
-    return blockletIndex;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/schema/SchemaEvolution.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/SchemaEvolution.java b/core/src/main/java/org/carbondata/core/carbon/metadata/schema/SchemaEvolution.java
deleted file mode 100644
index 6ce7f5e..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/SchemaEvolution.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.schema;
-
-import java.io.Serializable;
-import java.util.List;
-
-/**
- * Persisting schema restructuring information;
- */
-public class SchemaEvolution implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = 8186224567517679868L;
-
-  /**
-   * list of schema evolution entry
-   */
-  private List<SchemaEvolutionEntry> schemaEvolutionEntryList;
-
-  /**
-   * @return the schemaEvolutionEntryList
-   */
-  public List<SchemaEvolutionEntry> getSchemaEvolutionEntryList() {
-    return schemaEvolutionEntryList;
-  }
-
-  /**
-   * @param schemaEvolutionEntryList the schemaEvolutionEntryList to set
-   */
-  public void setSchemaEvolutionEntryList(List<SchemaEvolutionEntry> schemaEvolutionEntryList) {
-    this.schemaEvolutionEntryList = schemaEvolutionEntryList;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/schema/SchemaEvolutionEntry.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/SchemaEvolutionEntry.java b/core/src/main/java/org/carbondata/core/carbon/metadata/schema/SchemaEvolutionEntry.java
deleted file mode 100644
index ef06963..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/SchemaEvolutionEntry.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.schema;
-
-import java.io.Serializable;
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-
-/**
- * Store the infomation about the schema evolution
- */
-public class SchemaEvolutionEntry implements Serializable {
-
-  /**
-   * serilization version
-   */
-  private static final long serialVersionUID = -7619477063676325276L;
-
-  /**
-   * time stamp of restructuring
-   */
-  private long timeStamp;
-
-  /**
-   * new column added in restructuring
-   */
-  private List<ColumnSchema> added;
-
-  /**
-   * column removed in restructuring
-   */
-  private List<ColumnSchema> removed;
-
-  /**
-   * @return the timeStamp
-   */
-  public long getTimeStamp() {
-    return timeStamp;
-  }
-
-  /**
-   * @param timeStamp the timeStamp to set
-   */
-  public void setTimeStamp(long timeStamp) {
-    this.timeStamp = timeStamp;
-  }
-
-  /**
-   * @return the added
-   */
-  public List<ColumnSchema> getAdded() {
-    return added;
-  }
-
-  /**
-   * @param added the added to set
-   */
-  public void setAdded(List<ColumnSchema> added) {
-    this.added = added;
-  }
-
-  /**
-   * @return the removed
-   */
-  public List<ColumnSchema> getRemoved() {
-    return removed;
-  }
-
-  /**
-   * @param removed the removed to set
-   */
-  public void setRemoved(List<ColumnSchema> removed) {
-    this.removed = removed;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/CarbonTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/CarbonTable.java b/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/CarbonTable.java
deleted file mode 100644
index 4d16659..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/CarbonTable.java
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.metadata.schema.table;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-
-/**
- * Mapping class for Carbon actual table
- */
-public class CarbonTable implements Serializable {
-
-  /**
-   * serialization id
-   */
-  private static final long serialVersionUID = 8696507171227156445L;
-
-  /**
-   * Absolute table identifier
-   */
-  private AbsoluteTableIdentifier absoluteTableIdentifier;
-
-  /**
-   * TableName, Dimensions list
-   */
-  private Map<String, List<CarbonDimension>> tableDimensionsMap;
-
-  /**
-   * table measures list.
-   */
-  private Map<String, List<CarbonMeasure>> tableMeasuresMap;
-
-  /**
-   * tableUniqueName
-   */
-  private String tableUniqueName;
-
-  /**
-   * Aggregate tables name
-   */
-  private List<String> aggregateTablesName;
-
-  /**
-   * metadata file path (check if it is really required )
-   */
-  private String metaDataFilepath;
-
-  /**
-   * last updated time
-   */
-  private long tableLastUpdatedTime;
-
-  public CarbonTable() {
-    this.tableDimensionsMap = new HashMap<String, List<CarbonDimension>>();
-    this.tableMeasuresMap = new HashMap<String, List<CarbonMeasure>>();
-    this.aggregateTablesName = new ArrayList<String>();
-  }
-
-  /**
-   * @param tableInfo
-   */
-  public void loadCarbonTable(TableInfo tableInfo) {
-    this.tableLastUpdatedTime = tableInfo.getLastUpdatedTime();
-    this.tableUniqueName = tableInfo.getTableUniqueName();
-    this.metaDataFilepath = tableInfo.getMetaDataFilepath();
-    //setting unique table identifier
-    CarbonTableIdentifier carbontableIdentifier =
-        new CarbonTableIdentifier(tableInfo.getDatabaseName(),
-            tableInfo.getFactTable().getTableName(), tableInfo.getFactTable().getTableId());
-    this.absoluteTableIdentifier =
-        new AbsoluteTableIdentifier(tableInfo.getStorePath(), carbontableIdentifier);
-
-    fillDimensionsAndMeasuresForTables(tableInfo.getFactTable());
-    List<TableSchema> aggregateTableList = tableInfo.getAggregateTableList();
-    for (TableSchema aggTable : aggregateTableList) {
-      this.aggregateTablesName.add(aggTable.getTableName());
-      fillDimensionsAndMeasuresForTables(aggTable);
-    }
-  }
-
-  /**
-   * Fill dimensions and measures for carbon table
-   *
-   * @param tableSchema
-   */
-  private void fillDimensionsAndMeasuresForTables(TableSchema tableSchema) {
-    List<CarbonDimension> dimensions = new ArrayList<CarbonDimension>();
-    List<CarbonMeasure> measures = new ArrayList<CarbonMeasure>();
-    this.tableDimensionsMap.put(tableSchema.getTableName(), dimensions);
-    this.tableMeasuresMap.put(tableSchema.getTableName(), measures);
-    int dimensionOrdinal = 0;
-    int measureOrdinal = 0;
-    int keyOrdinal = 0;
-    int columnGroupOrdinal = -1;
-    int previousColumnGroupId = -1;
-    List<ColumnSchema> listOfColumns = tableSchema.getListOfColumns();
-    int complexTypeOrdinal = -1;
-    for (int i = 0; i < listOfColumns.size(); i++) {
-      ColumnSchema columnSchema = listOfColumns.get(i);
-      if (columnSchema.isDimensionColumn()) {
-        if (columnSchema.getNumberOfChild() > 0) {
-          CarbonDimension complexDimension =
-              new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1, ++complexTypeOrdinal);
-          complexDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
-          dimensions.add(complexDimension);
-          dimensionOrdinal =
-              readAllComplexTypeChildrens(dimensionOrdinal, columnSchema.getNumberOfChild(),
-                  listOfColumns, complexDimension);
-          i = dimensionOrdinal - 1;
-          complexTypeOrdinal = assignComplexOrdinal(complexDimension, complexTypeOrdinal);
-        } else {
-          if (!columnSchema.getEncodingList().contains(Encoding.DICTIONARY)) {
-            dimensions.add(new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1, -1));
-          } else if (columnSchema.getEncodingList().contains(Encoding.DICTIONARY)
-              && columnSchema.getColumnGroupId() == -1) {
-            dimensions
-                .add(new CarbonDimension(columnSchema, dimensionOrdinal++, keyOrdinal++, -1, -1));
-          } else {
-            columnGroupOrdinal =
-                previousColumnGroupId == columnSchema.getColumnGroupId() ? ++columnGroupOrdinal : 0;
-            previousColumnGroupId = columnSchema.getColumnGroupId();
-            dimensions.add(new CarbonDimension(columnSchema, dimensionOrdinal++, keyOrdinal++,
-                columnGroupOrdinal, -1));
-
-          }
-        }
-      } else {
-        measures.add(new CarbonMeasure(columnSchema, measureOrdinal++));
-      }
-    }
-  }
-
-  /**
-   * Read all primitive/complex children and set it as list of child carbon dimension to parent
-   * dimension
-   *
-   * @param dimensionOrdinal
-   * @param childCount
-   * @param listOfColumns
-   * @param parentDimension
-   * @return
-   */
-  private int readAllComplexTypeChildrens(int dimensionOrdinal, int childCount,
-      List<ColumnSchema> listOfColumns, CarbonDimension parentDimension) {
-    for (int i = 0; i < childCount; i++) {
-      ColumnSchema columnSchema = listOfColumns.get(dimensionOrdinal);
-      if (columnSchema.isDimensionColumn()) {
-        if (columnSchema.getNumberOfChild() > 0) {
-          CarbonDimension complexDimension =
-              new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1, -1);
-          complexDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
-          parentDimension.getListOfChildDimensions().add(complexDimension);
-          dimensionOrdinal =
-              readAllComplexTypeChildrens(dimensionOrdinal, columnSchema.getNumberOfChild(),
-                  listOfColumns, complexDimension);
-        } else {
-          parentDimension.getListOfChildDimensions()
-              .add(new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1, -1));
-        }
-      }
-    }
-    return dimensionOrdinal;
-  }
-
-  /**
-   * Read all primitive/complex children and set it as list of child carbon dimension to parent
-   * dimension
-   *
-   * @param dimensionOrdinal
-   * @param childCount
-   * @param listOfColumns
-   * @param parentDimension
-   * @return
-   */
-  private int assignComplexOrdinal(CarbonDimension parentDimension, int complexDimensionOrdianl) {
-    for (int i = 0; i < parentDimension.getNumberOfChild(); i++) {
-      CarbonDimension dimension = parentDimension.getListOfChildDimensions().get(i);
-      if (dimension.getNumberOfChild() > 0) {
-        dimension.setComplexTypeOridnal(++complexDimensionOrdianl);
-        complexDimensionOrdianl = assignComplexOrdinal(dimension, complexDimensionOrdianl);
-      } else {
-        parentDimension.getListOfChildDimensions().get(i)
-            .setComplexTypeOridnal(++complexDimensionOrdianl);
-      }
-    }
-    return complexDimensionOrdianl;
-  }
-
-  /**
-   * @return the databaseName
-   */
-  public String getDatabaseName() {
-    return absoluteTableIdentifier.getCarbonTableIdentifier().getDatabaseName();
-  }
-
-  /**
-   * @return the tabelName
-   */
-  public String getFactTableName() {
-    return absoluteTableIdentifier.getCarbonTableIdentifier().getTableName();
-  }
-
-  /**
-   * @return the tableUniqueName
-   */
-  public String getTableUniqueName() {
-    return tableUniqueName;
-  }
-
-  /**
-   * @return the metaDataFilepath
-   */
-  public String getMetaDataFilepath() {
-    return metaDataFilepath;
-  }
-
-  /**
-   * @return storepath
-   */
-  public String getStorePath() {
-    return absoluteTableIdentifier.getStorePath();
-  }
-
-  /**
-   * @return list of aggregate TablesName
-   */
-  public List<String> getAggregateTablesName() {
-    return aggregateTablesName;
-  }
-
-  /**
-   * @return the tableLastUpdatedTime
-   */
-  public long getTableLastUpdatedTime() {
-    return tableLastUpdatedTime;
-  }
-
-  /**
-   * to get the number of dimension present in the table
-   *
-   * @param tableName
-   * @return number of dimension present the table
-   */
-  public int getNumberOfDimensions(String tableName) {
-    return tableDimensionsMap.get(tableName).size();
-  }
-
-  /**
-   * to get the number of measures present in the table
-   *
-   * @param tableName
-   * @return number of measures present the table
-   */
-  public int getNumberOfMeasures(String tableName) {
-    return tableMeasuresMap.get(tableName).size();
-  }
-
-  /**
-   * to get the all dimension of a table
-   *
-   * @param tableName
-   * @return all dimension of a table
-   */
-  public List<CarbonDimension> getDimensionByTableName(String tableName) {
-    return tableDimensionsMap.get(tableName);
-  }
-
-  /**
-   * to get the all measure of a table
-   *
-   * @param tableName
-   * @return all measure of a table
-   */
-  public List<CarbonMeasure> getMeasureByTableName(String tableName) {
-    return tableMeasuresMap.get(tableName);
-  }
-
-  /**
-   * to get particular measure from a table
-   *
-   * @param tableName
-   * @param columnName
-   * @return
-   */
-  public CarbonMeasure getMeasureByName(String tableName, String columnName) {
-    List<CarbonMeasure> measureList = tableMeasuresMap.get(tableName);
-    for (CarbonMeasure measure : measureList) {
-      if (measure.getColName().equalsIgnoreCase(columnName)) {
-        return measure;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * to get particular dimension from a table
-   *
-   * @param tableName
-   * @param columnName
-   * @return
-   */
-  public CarbonDimension getDimensionByName(String tableName, String columnName) {
-    List<CarbonDimension> dimList = tableDimensionsMap.get(tableName);
-    for (CarbonDimension dim : dimList) {
-      if (dim.getColName().equalsIgnoreCase(columnName)) {
-        return dim;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * gets all children dimension for complex type
-   *
-   * @param dimName
-   * @return list of child dimensions
-   */
-  public List<CarbonDimension> getChildren(String dimName) {
-    for (List<CarbonDimension> list : tableDimensionsMap.values()) {
-      List<CarbonDimension> childDims = getChildren(dimName, list);
-      if (childDims != null) {
-        return childDims;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * returns level 2 or more child dimensions
-   *
-   * @param dimName
-   * @param dimensions
-   * @return list of child dimensions
-   */
-  public List<CarbonDimension> getChildren(String dimName, List<CarbonDimension> dimensions) {
-    for (CarbonDimension carbonDimension : dimensions) {
-      if (carbonDimension.getColName().equals(dimName)) {
-        return carbonDimension.getListOfChildDimensions();
-      } else if (null != carbonDimension.getListOfChildDimensions()
-          && carbonDimension.getListOfChildDimensions().size() > 0) {
-        List<CarbonDimension> childDims =
-            getChildren(dimName, carbonDimension.getListOfChildDimensions());
-        if (childDims != null) {
-          return childDims;
-        }
-      }
-    }
-    return null;
-  }
-
-  /**
-   * @return absolute table identifier
-   */
-  public AbsoluteTableIdentifier getAbsoluteTableIdentifier() {
-    return absoluteTableIdentifier;
-  }
-
-  /**
-   * @return carbon table identifier
-   */
-  public CarbonTableIdentifier getCarbonTableIdentifier() {
-    return absoluteTableIdentifier.getCarbonTableIdentifier();
-  }
-
-  /**
-   * gets partition count for this table
-   * TODO: to be implemented while supporting partitioning
-   */
-  public int getPartitionCount() {
-    return 1;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/TableInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/TableInfo.java b/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/TableInfo.java
deleted file mode 100644
index 0a9cfca..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/TableInfo.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.schema.table;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * Store the information about the table.
- * it stores the fact table as well as aggregate table present in the schema
- */
-public class TableInfo implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = -5034287968314105193L;
-
-  /**
-   * name of the database;
-   */
-  private String databaseName;
-
-  /**
-   * table name to group fact table and aggregate table
-   */
-  private String tableUniqueName;
-
-  /**
-   * fact table information
-   */
-  private TableSchema factTable;
-
-  /**
-   * list of aggregate table
-   */
-  private List<TableSchema> aggregateTableList;
-
-  /**
-   * last updated time to update the table if any changes
-   */
-  private long lastUpdatedTime;
-
-  /**
-   * metadata file path (check if it is really required )
-   */
-  private String metaDataFilepath;
-
-  /**
-   * store location
-   */
-  private String storePath;
-
-  public TableInfo() {
-    aggregateTableList = new ArrayList<TableSchema>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-  }
-
-  /**
-   * @return the factTable
-   */
-  public TableSchema getFactTable() {
-    return factTable;
-  }
-
-  /**
-   * @param factTable the factTable to set
-   */
-  public void setFactTable(TableSchema factTable) {
-    this.factTable = factTable;
-  }
-
-  /**
-   * @return the aggregateTableList
-   */
-  public List<TableSchema> getAggregateTableList() {
-    return aggregateTableList;
-  }
-
-  /**
-   * @param aggregateTableList the aggregateTableList to set
-   */
-  public void setAggregateTableList(List<TableSchema> aggregateTableList) {
-    this.aggregateTableList = aggregateTableList;
-  }
-
-  /**
-   * @return the databaseName
-   */
-  public String getDatabaseName() {
-    return databaseName;
-  }
-
-  /**
-   * @param databaseName the databaseName to set
-   */
-  public void setDatabaseName(String databaseName) {
-    this.databaseName = databaseName;
-  }
-
-  public TableSchema getTableSchemaByName(String tableName) {
-    if (factTable.getTableName().equalsIgnoreCase(tableName)) {
-      return factTable;
-    }
-    for (TableSchema aggregatTableSchema : aggregateTableList) {
-      if (aggregatTableSchema.getTableName().equals(tableName)) {
-        return aggregatTableSchema;
-      }
-    }
-    return null;
-  }
-
-  public TableSchema getTableSchemaByTableId(String tableId) {
-    if (factTable.getTableId().equals(tableId)) {
-      return factTable;
-    }
-    for (TableSchema aggregatTableSchema : aggregateTableList) {
-      if (aggregatTableSchema.getTableId().equals(tableId)) {
-        return aggregatTableSchema;
-      }
-    }
-    return null;
-  }
-
-  public int getNumberOfAggregateTables() {
-    return aggregateTableList.size();
-  }
-
-  /**
-   * @return the tableUniqueName
-   */
-  public String getTableUniqueName() {
-    return tableUniqueName;
-  }
-
-  /**
-   * @param tableUniqueName the tableUniqueName to set
-   */
-  public void setTableUniqueName(String tableUniqueName) {
-    this.tableUniqueName = tableUniqueName;
-  }
-
-  /**
-   * @return the lastUpdatedTime
-   */
-  public long getLastUpdatedTime() {
-    return lastUpdatedTime;
-  }
-
-  /**
-   * @param lastUpdatedTime the lastUpdatedTime to set
-   */
-  public void setLastUpdatedTime(long lastUpdatedTime) {
-    this.lastUpdatedTime = lastUpdatedTime;
-  }
-
-  /**
-   * @return
-   */
-  public String getMetaDataFilepath() {
-    return metaDataFilepath;
-  }
-
-  /**
-   * @param metaDataFilepath
-   */
-  public void setMetaDataFilepath(String metaDataFilepath) {
-    this.metaDataFilepath = metaDataFilepath;
-  }
-
-  public String getStorePath() {
-    return storePath;
-  }
-
-  public void setStorePath(String storePath) {
-    this.storePath = storePath;
-  }
-
-  /**
-   * to generate the hash code
-   */
-  @Override public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + ((databaseName == null) ? 0 : databaseName.hashCode());
-    result = prime * result + ((tableUniqueName == null) ? 0 : tableUniqueName.hashCode());
-    return result;
-  }
-
-  /**
-   * Overridden equals method
-   */
-  @Override public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (obj == null) {
-      return false;
-    }
-    if (!(obj instanceof TableInfo)) {
-      return false;
-    }
-    TableInfo other = (TableInfo) obj;
-    if (databaseName == null) {
-      if (other.databaseName != null) {
-        return false;
-      }
-    } else if (!tableUniqueName.equals(other.tableUniqueName)) {
-      return false;
-    }
-
-    if (tableUniqueName == null) {
-      if (other.tableUniqueName != null) {
-        return false;
-      }
-    } else if (!tableUniqueName.equals(other.tableUniqueName)) {
-      return false;
-    }
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/TableSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/TableSchema.java b/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/TableSchema.java
deleted file mode 100644
index 06e9cf5..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/TableSchema.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.schema.table;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.schema.SchemaEvolution;
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * Persisting the table information
- */
-public class TableSchema implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = -1928614587722507026L;
-
-  /**
-   * table id
-   */
-  private String tableId;
-
-  /**
-   * table Name
-   */
-  private String tableName;
-
-  /**
-   * Columns in the table
-   */
-  private List<ColumnSchema> listOfColumns;
-
-  /**
-   * History of schema evolution of this table
-   */
-  private SchemaEvolution schemaEvalution;
-
-  public TableSchema() {
-    this.listOfColumns = new ArrayList<ColumnSchema>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-  }
-
-  /**
-   * @return the tableId
-   */
-  public String getTableId() {
-    return tableId;
-  }
-
-  /**
-   * @param tableId the tableId to set
-   */
-  public void setTableId(String tableId) {
-    this.tableId = tableId;
-  }
-
-  /**
-   * @return the listOfColumns
-   */
-  public List<ColumnSchema> getListOfColumns() {
-    return listOfColumns;
-  }
-
-  /**
-   * @param listOfColumns the listOfColumns to set
-   */
-  public void setListOfColumns(List<ColumnSchema> listOfColumns) {
-    this.listOfColumns = listOfColumns;
-  }
-
-  /**
-   * @return the schemaEvalution
-   */
-  public SchemaEvolution getSchemaEvalution() {
-    return schemaEvalution;
-  }
-
-  /**
-   * @param schemaEvalution the schemaEvalution to set
-   */
-  public void setSchemaEvalution(SchemaEvolution schemaEvalution) {
-    this.schemaEvalution = schemaEvalution;
-  }
-
-  /**
-   * @return the tableName
-   */
-  public String getTableName() {
-    return tableName;
-  }
-
-  /**
-   * @param tableName the tableName to set
-   */
-  public void setTableName(String tableName) {
-    this.tableName = tableName;
-  }
-
-  /**
-   * to get the column schema present in the table by name
-   *
-   * @param columnName
-   * @return column schema if matches the name
-   */
-  public ColumnSchema getColumnSchemaByName(String columnName) {
-    for (ColumnSchema tableColumn : listOfColumns) {
-      if (tableColumn.getColumnName().equals(columnName)) {
-        return tableColumn;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * to get the column schema present in the table by unique id
-   *
-   * @param columnUniqueId
-   * @return column schema if matches the id
-   */
-  public ColumnSchema getColumnSchemaById(String columnUniqueId) {
-    for (ColumnSchema tableColumn : listOfColumns) {
-      if (tableColumn.getColumnUniqueId().equalsIgnoreCase(columnUniqueId)) {
-        return tableColumn;
-      }
-    }
-    return null;
-  }
-
-  @Override public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + ((tableId == null) ? 0 : tableId.hashCode());
-    result = prime * result + ((tableName == null) ? 0 : tableName.hashCode());
-    return result;
-  }
-
-  @Override public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    TableSchema other = (TableSchema) obj;
-    if (tableId == null) {
-      if (other.tableId != null) {
-        return false;
-      }
-    } else if (!tableId.equals(other.tableId)) {
-      return false;
-    }
-    if (tableName == null) {
-      if (other.tableName != null) {
-        return false;
-      }
-    } else if (!tableName.equals(other.tableName)) {
-      return false;
-    }
-    return true;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonColumn.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonColumn.java b/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonColumn.java
deleted file mode 100644
index 90d1869..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/schema/table/column/CarbonColumn.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.metadata.schema.table.column;
-
-import java.io.Serializable;
-import java.util.List;
-import java.util.Map;
-
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-
-public class CarbonColumn implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = 3648269871256322681L;
-
-  /**
-   * column schema
-   */
-  protected ColumnSchema columnSchema;
-
-  /**
-   * table ordinal
-   */
-  protected int ordinal;
-
-  /**
-   * default value for in case of restructuring will be used when older
-   * segment does not have particular column
-   */
-  protected byte[] defaultValue;
-
-  /**
-   * Column identifier
-   */
-  protected ColumnIdentifier columnIdentifier;
-
-  public CarbonColumn(ColumnSchema columnSchema, int ordinal) {
-    this.columnSchema = columnSchema;
-    this.ordinal = ordinal;
-    this.columnIdentifier =
-        new ColumnIdentifier(getColumnId(), getColumnProperties(), getDataType());
-  }
-
-  /**
-   * @return columnar or row based
-   */
-  public boolean isColumnar() {
-    return columnSchema.isColumnar();
-  }
-
-  /**
-   * @return column unique id
-   */
-  public String getColumnId() {
-    return columnSchema.getColumnUniqueId();
-  }
-
-  /**
-   * @return the dataType
-   */
-  public DataType getDataType() {
-    return columnSchema.getDataType();
-  }
-
-  /**
-   * @return the colName
-   */
-  public String getColName() {
-    return columnSchema.getColumnName();
-  }
-
-  /**
-   * @return the ordinal
-   */
-  public int getOrdinal() {
-    return ordinal;
-  }
-
-  /**
-   * @return the list of encoder used in dimension
-   */
-  public List<Encoding> getEncoder() {
-    return columnSchema.getEncodingList();
-  }
-
-  /**
-   * @return row group id if it is row based
-   */
-  public int columnGroupId() {
-    return columnSchema.getColumnGroupId();
-  }
-
-  /**
-   * @return the defaultValue
-   */
-  public byte[] getDefaultValue() {
-    return defaultValue;
-  }
-
-  /**
-   * @param defaultValue the defaultValue to set
-   */
-  public void setDefaultValue(byte[] defaultValue) {
-    this.defaultValue = defaultValue;
-  }
-
-  /**
-   * @param encoding
-   * @return true if contains the passing encoding
-   */
-  public boolean hasEncoding(Encoding encoding) {
-    return columnSchema.hasEncoding(encoding);
-  }
-
-  /**
-   * @return if DataType is ARRAY or STRUCT, this method return true, else
-   * false.
-   */
-  public Boolean isComplex() {
-    return columnSchema.isComplex();
-  }
-
-  /**
-   * @return if column is dimension return true, else false.
-   */
-  public Boolean isDimesion() {
-    return columnSchema.isDimensionColumn();
-  }
-
-  /**
-   * @return if column use inverted index return true, else false.
-   */
-  public Boolean isUseInvertedIndnex() {
-    return columnSchema.isUseInvertedIndex();
-  }
-  public ColumnSchema getColumnSchema() {
-    return this.columnSchema;
-  }
-
-  /**
-   * @return columnproperty
-   */
-  public Map<String, String> getColumnProperties() {
-    return this.columnSchema.getColumnProperties();
-  }
-
-  /**
-   * @return columnIdentifier
-   */
-  public ColumnIdentifier getColumnIdentifier() {
-    return this.columnIdentifier;
-  }
-}



[33/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortInfo.java b/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortInfo.java
new file mode 100644
index 0000000..77bcef5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortInfo.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.writer.sortindex;
+
+import java.util.List;
+
+/**
+ * Model to hold the sortIndex and sortIndexInverted data
+ */
+public class CarbonDictionarySortInfo {
+  /**
+   * Sort index after members are sorted
+   */
+  private List<Integer> sortIndex;
+  /**
+   * inverted sort index to get the member
+   */
+  private List<Integer> sortIndexInverted;
+
+  /**
+   * The constructor to instantiate the CarbonDictionarySortInfo object
+   * with sortIndex and sortInverted Index data
+   *
+   * @param sortIndex
+   * @param sortIndexInverted
+   */
+  public CarbonDictionarySortInfo(List<Integer> sortIndex, List<Integer> sortIndexInverted) {
+    this.sortIndex = sortIndex;
+    this.sortIndexInverted = sortIndexInverted;
+  }
+
+  /**
+   * return list of sortIndex
+   *
+   * @return
+   */
+  public List<Integer> getSortIndex() {
+    return sortIndex;
+  }
+
+  /**
+   * returns list of sortindexinverted
+   *
+   * @return
+   */
+  public List<Integer> getSortIndexInverted() {
+    return sortIndexInverted;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortInfoPreparator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortInfoPreparator.java b/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortInfoPreparator.java
new file mode 100644
index 0000000..3a7f0f1
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortInfoPreparator.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.writer.sortindex;
+
+import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.cache.dictionary.DictionaryChunksWrapper;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.CarbonUtilException;
+
+import org.apache.commons.lang.ArrayUtils;
+
+/**
+ * The class prepares the column sort info ie sortIndex
+ * and inverted sort index info
+ */
+public class CarbonDictionarySortInfoPreparator {
+
+  /**
+   * The method returns the column Sort Info
+   *
+   * @param newDistinctValues new distinct value to be added
+   * @param dictionary        old distinct values
+   * @param dataType          DataType of columns
+   * @return CarbonDictionarySortInfo returns the column Sort Info
+   * @throws CarbonUtilException
+   */
+  public CarbonDictionarySortInfo getDictionarySortInfo(List<String> newDistinctValues,
+      Dictionary dictionary, DataType dataType) throws CarbonUtilException {
+    CarbonDictionarySortModel[] dictionarySortModels =
+        prepareDictionarySortModels(newDistinctValues, dictionary, dataType);
+    return createColumnSortInfo(dictionarySortModels);
+  }
+
+  /**
+   * The method prepares the sort_index and sort_index_inverted data
+   *
+   * @param dictionarySortModels
+   */
+  private CarbonDictionarySortInfo createColumnSortInfo(
+      CarbonDictionarySortModel[] dictionarySortModels) {
+
+    //Sort index after members are sorted
+    int[] sortIndex;
+    //inverted sort index to get the member
+    int[] sortIndexInverted;
+
+    Arrays.sort(dictionarySortModels);
+    sortIndex = new int[dictionarySortModels.length];
+    sortIndexInverted = new int[dictionarySortModels.length];
+
+    for (int i = 0; i < dictionarySortModels.length; i++) {
+      CarbonDictionarySortModel dictionarySortModel = dictionarySortModels[i];
+      sortIndex[i] = dictionarySortModel.getKey();
+      // the array index starts from 0 therefore -1 is done to avoid wastage
+      // of 0th index in array and surrogate key starts from 1 there 1 is added to i
+      // which is a counter starting from 0
+      sortIndexInverted[dictionarySortModel.getKey() - 1] = i + 1;
+    }
+    dictionarySortModels = null;
+    List<Integer> sortIndexList = convertToList(sortIndex);
+    List<Integer> sortIndexInvertedList = convertToList(sortIndexInverted);
+    return new CarbonDictionarySortInfo(sortIndexList, sortIndexInvertedList);
+  }
+
+  /**
+   * The method converts the int[] to List<Integer>
+   *
+   * @param data
+   * @return
+   */
+  private List<Integer> convertToList(int[] data) {
+    Integer[] wrapperType = ArrayUtils.toObject(data);
+    return Arrays.asList(wrapperType);
+  }
+
+  /**
+   * The method returns the array of CarbonDictionarySortModel
+   *
+   * @param distinctValues new distinct values
+   * @param dictionary The wrapper wraps the list<list<bye[]>> and provide the
+   *                   iterator to retrieve the chunks members.
+   * @param dataType   DataType of columns
+   * @return CarbonDictionarySortModel[] CarbonDictionarySortModel[] the model
+   * CarbonDictionarySortModel contains the  member's surrogate and
+   * its byte value
+   */
+  private CarbonDictionarySortModel[] prepareDictionarySortModels(List<String> distinctValues,
+      Dictionary dictionary, DataType dataType) {
+    CarbonDictionarySortModel[] dictionarySortModels = null;
+    //The wrapper wraps the list<list<bye[]>> and provide the iterator to
+    // retrieve the chunks members.
+    int surrogate = 1;
+    if (null != dictionary) {
+      DictionaryChunksWrapper dictionaryChunksWrapper = dictionary.getDictionaryChunks();
+      dictionarySortModels =
+          new CarbonDictionarySortModel[dictionaryChunksWrapper.getSize() + distinctValues.size()];
+      while (dictionaryChunksWrapper.hasNext()) {
+        dictionarySortModels[surrogate - 1] =
+            createDictionarySortModel(surrogate, dataType, dictionaryChunksWrapper.next());
+        surrogate++;
+      }
+    } else {
+      dictionarySortModels = new CarbonDictionarySortModel[distinctValues.size()];
+    }
+    // for new distinct values
+    Iterator<String> distinctValue = distinctValues.iterator();
+    while (distinctValue.hasNext()) {
+      dictionarySortModels[surrogate - 1] =
+          createDictionarySortModel(surrogate, dataType, distinctValue.next().getBytes());
+      surrogate++;
+    }
+    return dictionarySortModels;
+  }
+
+  /**
+   *
+   * @param surrogate
+   * @param dataType
+   * @param value member value
+   * @return CarbonDictionarySortModel
+   */
+  private CarbonDictionarySortModel createDictionarySortModel(int surrogate, DataType dataType,
+      byte[] value) {
+    String memberValue = new String(value, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
+    return new CarbonDictionarySortModel(surrogate, dataType, memberValue);
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortModel.java b/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortModel.java
new file mode 100644
index 0000000..0d3040a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortModel.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.writer.sortindex;
+
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.CarbonProperties;
+
+/**
+ * Dictionary sort model class holds the member byte value and corresponding key value.
+ */
+public class CarbonDictionarySortModel implements Comparable<CarbonDictionarySortModel> {
+
+  /**
+   * Surrogate key
+   */
+  private int key;
+
+  /**
+   * member value in bytes
+   */
+  private String memberValue;
+
+  /**
+   * member dataType
+   */
+  private DataType dataType;
+
+  /**
+   * Constructor to init the dictionary sort model
+   *
+   * @param key
+   * @param dataType
+   * @param memberValue
+   */
+  public CarbonDictionarySortModel(int key, DataType dataType, String memberValue) {
+    this.key = key;
+    this.dataType = dataType;
+    this.memberValue = memberValue;
+  }
+
+  /**
+   * Compare
+   */
+  @Override public int compareTo(CarbonDictionarySortModel o) {
+    switch (dataType) {
+      case SHORT:
+      case INT:
+      case LONG:
+      case DOUBLE:
+
+        Double d1 = null;
+        Double d2 = null;
+        try {
+          d1 = new Double(memberValue);
+        } catch (NumberFormatException e) {
+          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(o.memberValue)) {
+            return -1;
+          }
+          return 1;
+        }
+        try {
+          d2 = new Double(o.memberValue);
+        } catch (NumberFormatException e) {
+          return -1;
+        }
+        return d1.compareTo(d2);
+      case DECIMAL:
+        java.math.BigDecimal val1 = null;
+        java.math.BigDecimal val2 = null;
+        try {
+          val1 = new java.math.BigDecimal(memberValue);
+        } catch (NumberFormatException e) {
+          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(o.memberValue)) {
+            return -1;
+          }
+          return 1;
+        }
+        try {
+          val2 = new java.math.BigDecimal(o.memberValue);
+        } catch (NumberFormatException e) {
+          return -1;
+        }
+        return val1.compareTo(val2);
+      case TIMESTAMP:
+        SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
+            .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+        Date date1 = null;
+        Date date2 = null;
+        try {
+          date1 = parser.parse(memberValue);
+        } catch (ParseException e) {
+          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(o.memberValue)) {
+            return -1;
+          }
+          return 1;
+        }
+        try {
+          date2 = parser.parse(o.memberValue);
+        } catch (ParseException e) {
+          return -1;
+        }
+        return date1.compareTo(date2);
+      case STRING:
+      default:
+        return this.memberValue.compareTo(o.memberValue);
+    }
+  }
+
+  /**
+   * @see Object#hashCode()
+   */
+  @Override public int hashCode() {
+    int result = ((memberValue == null) ? 0 : memberValue.hashCode());
+    return result;
+  }
+
+  /**
+   * @see Object#equals(Object)
+   */
+  @Override public boolean equals(Object obj) {
+    if (obj instanceof CarbonDictionarySortModel) {
+      if (this == obj) {
+        return true;
+      }
+      CarbonDictionarySortModel other = (CarbonDictionarySortModel) obj;
+      if (memberValue == null) {
+        if (other.memberValue != null) {
+          return false;
+        }
+      } else if (!this.memberValue.equals(other.memberValue)) {
+        return false;
+      }
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  /**
+   * return the surrogate of the member
+   *
+   * @return
+   */
+  public int getKey() {
+    return key;
+  }
+
+  /**
+   * Returns member buye
+   *
+   * @return
+   */
+  public String getMemberValue() {
+    return memberValue;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/collector/ScannedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/collector/ScannedResultCollector.java b/core/src/main/java/org/apache/carbondata/scan/collector/ScannedResultCollector.java
new file mode 100644
index 0000000..dce6ae5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/collector/ScannedResultCollector.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.collector;
+
+import java.util.List;
+
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+
+/**
+ * Interface which will be used to aggregate the scan result
+ */
+public interface ScannedResultCollector {
+
+  /**
+   * Below method will be used to aggregate the scanned result
+   *
+   * @param scannedResult scanned result
+   * @return how many records was aggregated
+   */
+  List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/collector/impl/AbstractScannedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/collector/impl/AbstractScannedResultCollector.java b/core/src/main/java/org/apache/carbondata/scan/collector/impl/AbstractScannedResultCollector.java
new file mode 100644
index 0000000..387b54f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/collector/impl/AbstractScannedResultCollector.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.collector.impl;
+
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.core.util.DataTypeUtil;
+import org.apache.carbondata.scan.collector.ScannedResultCollector;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+import org.apache.carbondata.scan.executor.util.QueryUtil;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+import org.apache.carbondata.scan.wrappers.ByteArrayWrapper;
+
+/**
+ * It is not a collector it is just a scanned result holder.
+ */
+public abstract class AbstractScannedResultCollector implements ScannedResultCollector {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(AbstractScannedResultCollector.class.getName());
+
+  /**
+   * restructuring info
+   */
+  private KeyStructureInfo restructureInfos;
+
+  /**
+   * table block execution infos
+   */
+  protected BlockExecutionInfo tableBlockExecutionInfos;
+
+  /**
+   * Measure ordinals
+   */
+  protected int[] measuresOrdinal;
+
+  /**
+   * to check whether measure exists in current table block or not this to
+   * handle restructuring scenario
+   */
+  protected boolean[] isMeasureExistsInCurrentBlock;
+
+  /**
+   * default value of the measures in case of restructuring some measure wont
+   * be present in the table so in that default value will be used to
+   * aggregate the data for that measure columns
+   */
+  private Object[] measureDefaultValue;
+
+  /**
+   * measure datatypes.
+   */
+  protected DataType[] measureDatatypes;
+
+  public AbstractScannedResultCollector(BlockExecutionInfo blockExecutionInfos) {
+    this.tableBlockExecutionInfos = blockExecutionInfos;
+    restructureInfos = blockExecutionInfos.getKeyStructureInfo();
+    measuresOrdinal = tableBlockExecutionInfos.getAggregatorInfo().getMeasureOrdinals();
+    isMeasureExistsInCurrentBlock = tableBlockExecutionInfos.getAggregatorInfo().getMeasureExists();
+    measureDefaultValue = tableBlockExecutionInfos.getAggregatorInfo().getDefaultValues();
+    this.measureDatatypes = tableBlockExecutionInfos.getAggregatorInfo().getMeasureDataTypes();
+  }
+
+  protected void fillMeasureData(Object[] msrValues, int offset,
+      AbstractScannedResult scannedResult) {
+    for (short i = 0; i < measuresOrdinal.length; i++) {
+      // if measure exists is block then pass measure column
+      // data chunk to the collector
+      if (isMeasureExistsInCurrentBlock[i]) {
+        msrValues[i + offset] = getMeasureData(scannedResult.getMeasureChunk(measuresOrdinal[i]),
+            scannedResult.getCurrenrRowId(), measureDatatypes[i]);
+      } else {
+        // if not then get the default value and use that value in aggregation
+        msrValues[i + offset] = measureDefaultValue[i];
+      }
+    }
+  }
+
+  private Object getMeasureData(MeasureColumnDataChunk dataChunk, int index, DataType dataType) {
+    if (!dataChunk.getNullValueIndexHolder().getBitSet().get(index)) {
+      Object msrVal;
+      switch (dataType) {
+        case INT:
+        case LONG:
+          msrVal = dataChunk.getMeasureDataHolder().getReadableLongValueByIndex(index);
+          break;
+        case DECIMAL:
+          msrVal = dataChunk.getMeasureDataHolder().getReadableBigDecimalValueByIndex(index);
+          break;
+        default:
+          msrVal = dataChunk.getMeasureDataHolder().getReadableDoubleValueByIndex(index);
+      }
+      return DataTypeUtil.getMeasureDataBasedOnDataType(msrVal, dataType);
+    }
+    return null;
+  }
+
+  /**
+   * Below method will used to get the result
+   */
+  protected void updateData(List<Object[]> listBasedResult) {
+    if (tableBlockExecutionInfos.isFixedKeyUpdateRequired()) {
+      updateKeyWithLatestBlockKeygenerator(listBasedResult);
+    }
+  }
+
+  /**
+   * Below method will be used to update the fixed length key with the
+   * latest block key generator
+   *
+   * @return updated block
+   */
+  private void updateKeyWithLatestBlockKeygenerator(List<Object[]> listBasedResult) {
+    try {
+      long[] data = null;
+      ByteArrayWrapper key = null;
+      for (int i = 0; i < listBasedResult.size(); i++) {
+        // get the key
+        key = (ByteArrayWrapper)listBasedResult.get(i)[0];
+        // unpack the key with table block key generator
+        data = tableBlockExecutionInfos.getBlockKeyGenerator()
+            .getKeyArray(key.getDictionaryKey(), tableBlockExecutionInfos.getMaskedByteForBlock());
+        // packed the key with latest block key generator
+        // and generate the masked key for that key
+        key.setDictionaryKey(QueryUtil
+            .getMaskedKey(restructureInfos.getKeyGenerator().generateKey(data),
+                restructureInfos.getMaxKey(), restructureInfos.getMaskByteRanges(),
+                restructureInfos.getMaskByteRanges().length));
+      }
+    } catch (KeyGenException e) {
+      LOGGER.error(e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/collector/impl/DictionaryBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/collector/impl/DictionaryBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/scan/collector/impl/DictionaryBasedResultCollector.java
new file mode 100644
index 0000000..108677f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/collector/impl/DictionaryBasedResultCollector.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.collector.impl;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.DataTypeUtil;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.filter.GenericQueryType;
+import org.apache.carbondata.scan.model.QueryDimension;
+import org.apache.carbondata.scan.model.QueryMeasure;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+
+/**
+ * It is not a collector it is just a scanned result holder.
+ */
+public class DictionaryBasedResultCollector extends AbstractScannedResultCollector {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(DictionaryBasedResultCollector.class.getName());
+
+  public DictionaryBasedResultCollector(BlockExecutionInfo blockExecutionInfos) {
+    super(blockExecutionInfos);
+  }
+
+  /**
+   * This method will add a record both key and value to list object
+   * it will keep track of how many record is processed, to handle limit scenario
+   */
+  @Override public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {
+    List<Object[]> listBasedResult = new ArrayList<>(batchSize);
+    boolean isMsrsPresent = measureDatatypes.length > 0;
+    QueryDimension[] queryDimensions = tableBlockExecutionInfos.getQueryDimensions();
+    QueryMeasure[] queryMeasures = tableBlockExecutionInfos.getQueryMeasures();
+    Map<Integer, GenericQueryType> comlexDimensionInfoMap =
+        tableBlockExecutionInfos.getComlexDimensionInfoMap();
+    boolean[] dictionaryEncodingArray = CarbonUtil.getDictionaryEncodingArray(queryDimensions);
+    boolean[] directDictionaryEncodingArray =
+        CarbonUtil.getDirectDictionaryEncodingArray(queryDimensions);
+    boolean[] complexDataTypeArray = CarbonUtil.getComplexDataTypeArray(queryDimensions);
+    int dimSize = queryDimensions.length;
+    boolean isDimensionsExist = dimSize > 0;
+    int[] order = new int[dimSize + queryMeasures.length];
+    for (int i = 0; i < dimSize; i++) {
+      order[i] = queryDimensions[i].getQueryOrder();
+    }
+    for (int i = 0; i < queryMeasures.length; i++) {
+      order[i + dimSize] = queryMeasures[i].getQueryOrder();
+    }
+    // scan the record and add to list
+    int rowCounter = 0;
+    int dictionaryColumnIndex = 0;
+    int noDictionaryColumnIndex = 0;
+    int complexTypeColumnIndex = 0;
+    int[] surrogateResult;
+    String[] noDictionaryKeys;
+    byte[][] complexTypeKeyArray;
+    while (scannedResult.hasNext() && rowCounter < batchSize) {
+      Object[] row = new Object[dimSize + queryMeasures.length];
+      if (isDimensionsExist) {
+        surrogateResult = scannedResult.getDictionaryKeyIntegerArray();
+        noDictionaryKeys = scannedResult.getNoDictionaryKeyStringArray();
+        complexTypeKeyArray = scannedResult.getComplexTypeKeyArray();
+        dictionaryColumnIndex = 0;
+        noDictionaryColumnIndex = 0;
+        complexTypeColumnIndex = 0;
+        for (int i = 0; i < dimSize; i++) {
+          if (!dictionaryEncodingArray[i]) {
+            row[order[i]] = DataTypeUtil
+                .getDataBasedOnDataType(noDictionaryKeys[noDictionaryColumnIndex++],
+                    queryDimensions[i].getDimension().getDataType());
+          } else if (directDictionaryEncodingArray[i]) {
+            DirectDictionaryGenerator directDictionaryGenerator =
+                DirectDictionaryKeyGeneratorFactory
+                    .getDirectDictionaryGenerator(queryDimensions[i].getDimension().getDataType());
+            if (directDictionaryGenerator != null) {
+              row[order[i]] = directDictionaryGenerator.getValueFromSurrogate(
+                  surrogateResult[dictionaryColumnIndex++]);
+            }
+          } else if (complexDataTypeArray[i]) {
+            row[order[i]] = comlexDimensionInfoMap
+                .get(queryDimensions[i].getDimension().getOrdinal())
+                .getDataBasedOnDataTypeFromSurrogates(
+                    ByteBuffer.wrap(complexTypeKeyArray[complexTypeColumnIndex++]));
+          } else {
+            row[order[i]] = surrogateResult[dictionaryColumnIndex++];
+          }
+        }
+
+      } else {
+        scannedResult.incrementCounter();
+      }
+      if (isMsrsPresent) {
+        Object[] msrValues = new Object[measureDatatypes.length];
+        fillMeasureData(msrValues, 0, scannedResult);
+        for (int i = 0; i < msrValues.length; i++) {
+          row[order[i + dimSize]] = msrValues[i];
+        }
+      }
+      listBasedResult.add(row);
+      rowCounter++;
+    }
+    return listBasedResult;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/collector/impl/RawBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/collector/impl/RawBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/scan/collector/impl/RawBasedResultCollector.java
new file mode 100644
index 0000000..74d4170
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/collector/impl/RawBasedResultCollector.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.collector.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.model.QueryMeasure;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+import org.apache.carbondata.scan.wrappers.ByteArrayWrapper;
+
+/**
+ * It is not a collector it is just a scanned result holder.
+ */
+public class RawBasedResultCollector extends AbstractScannedResultCollector {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(RawBasedResultCollector.class.getName());
+
+  public RawBasedResultCollector(BlockExecutionInfo blockExecutionInfos) {
+    super(blockExecutionInfos);
+  }
+
+  /**
+   * This method will add a record both key and value to list object
+   * it will keep track of how many record is processed, to handle limit scenario
+   */
+  @Override public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {
+    List<Object[]> listBasedResult = new ArrayList<>(batchSize);
+    QueryMeasure[] queryMeasures = tableBlockExecutionInfos.getQueryMeasures();
+    ByteArrayWrapper wrapper = null;
+    // scan the record and add to list
+    int rowCounter = 0;
+    while (scannedResult.hasNext() && rowCounter < batchSize) {
+      Object[] row = new Object[1 + queryMeasures.length];
+      wrapper = new ByteArrayWrapper();
+      wrapper.setDictionaryKey(scannedResult.getDictionaryKeyArray());
+      wrapper.setNoDictionaryKeys(scannedResult.getNoDictionaryKeyArray());
+      wrapper.setComplexTypesKeys(scannedResult.getComplexTypeKeyArray());
+      row[0] = wrapper;
+      fillMeasureData(row, 1, scannedResult);
+      listBasedResult.add(row);
+      rowCounter++;
+    }
+    updateData(listBasedResult);
+    return listBasedResult;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/complextypes/ArrayQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/complextypes/ArrayQueryType.java b/core/src/main/java/org/apache/carbondata/scan/complextypes/ArrayQueryType.java
new file mode 100644
index 0000000..8dd6749
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/complextypes/ArrayQueryType.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.complextypes;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.scan.filter.GenericQueryType;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+import org.apache.spark.sql.catalyst.util.*;
+import org.apache.spark.sql.types.*;
+
+public class ArrayQueryType extends ComplexQueryType implements GenericQueryType {
+
+  private GenericQueryType children;
+  private int keyOrdinalForQuery;
+
+  public ArrayQueryType(String name, String parentname, int blockIndex) {
+    super(name, parentname, blockIndex);
+  }
+
+  @Override public void addChildren(GenericQueryType children) {
+    if (this.getName().equals(children.getParentname())) {
+      this.children = children;
+    } else {
+      this.children.addChildren(children);
+    }
+  }
+
+  @Override public String getName() {
+    return name;
+  }
+
+  @Override public void setName(String name) {
+    this.name = name;
+  }
+
+  @Override public String getParentname() {
+    return parentname;
+  }
+
+  @Override public void setParentname(String parentname) {
+    this.parentname = parentname;
+
+  }
+
+  @Override public void getAllPrimitiveChildren(List<GenericQueryType> primitiveChild) {
+    if (children instanceof PrimitiveQueryType) {
+      primitiveChild.add(children);
+    } else {
+      children.getAllPrimitiveChildren(primitiveChild);
+    }
+  }
+
+  public void parseBlocksAndReturnComplexColumnByteArray(
+      DimensionColumnDataChunk[] dimensionColumnDataChunks, int rowNumber,
+      DataOutputStream dataOutputStream) throws IOException {
+    byte[] input = new byte[8];
+    copyBlockDataChunk(dimensionColumnDataChunks, rowNumber, input);
+    ByteBuffer byteArray = ByteBuffer.wrap(input);
+    int dataLength = byteArray.getInt();
+    dataOutputStream.writeInt(dataLength);
+    if (dataLength == 0) {
+      // b.putInt(0);
+    } else {
+      int columnIndex = byteArray.getInt();
+      for (int i = 0; i < dataLength; i++) {
+        children
+            .parseBlocksAndReturnComplexColumnByteArray(dimensionColumnDataChunks, columnIndex++,
+                dataOutputStream);
+      }
+    }
+  }
+
+  @Override public int getSurrogateIndex() {
+    return 0;
+  }
+
+  @Override public void setSurrogateIndex(int surrIndex) {
+
+  }
+
+  @Override public int getBlockIndex() {
+    return blockIndex;
+  }
+
+  @Override public void setBlockIndex(int blockIndex) {
+    this.blockIndex = blockIndex;
+  }
+
+  @Override public int getColsCount() {
+    return children.getColsCount() + 1;
+  }
+
+  @Override public void parseAndGetResultBytes(ByteBuffer complexData, DataOutputStream dataOutput)
+      throws IOException {
+    int dataLength = complexData.getInt();
+    dataOutput.writeInt(dataLength);
+    for (int i = 0; i < dataLength; i++) {
+      children.parseAndGetResultBytes(complexData, dataOutput);
+    }
+  }
+
+  @Override public void setKeySize(int[] keyBlockSize) {
+    children.setKeySize(keyBlockSize);
+  }
+
+  @Override public DataType getSchemaType() {
+    return new ArrayType(null, true);
+  }
+
+  @Override public int getKeyOrdinalForQuery() {
+    return keyOrdinalForQuery;
+  }
+
+  @Override public void setKeyOrdinalForQuery(int keyOrdinalForQuery) {
+    this.keyOrdinalForQuery = keyOrdinalForQuery;
+  }
+
+  @Override public void fillRequiredBlockData(BlocksChunkHolder blockChunkHolder) {
+    readBlockDataChunk(blockChunkHolder);
+    children.fillRequiredBlockData(blockChunkHolder);
+  }
+
+  @Override public Object getDataBasedOnDataTypeFromSurrogates(ByteBuffer surrogateData) {
+    int dataLength = surrogateData.getInt();
+    if (dataLength == -1) {
+      return null;
+    }
+    Object[] data = new Object[dataLength];
+    for (int i = 0; i < dataLength; i++) {
+      data[i] = children.getDataBasedOnDataTypeFromSurrogates(surrogateData);
+    }
+    return new GenericArrayData(data);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/complextypes/ComplexQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/complextypes/ComplexQueryType.java b/core/src/main/java/org/apache/carbondata/scan/complextypes/ComplexQueryType.java
new file mode 100644
index 0000000..0a4c999
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/complextypes/ComplexQueryType.java
@@ -0,0 +1,80 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.complextypes;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.scan.filter.GenericQueryType;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public class ComplexQueryType {
+  protected GenericQueryType children;
+
+  protected String name;
+
+  protected String parentname;
+
+  protected int blockIndex;
+
+  public ComplexQueryType(String name, String parentname, int blockIndex) {
+    this.name = name;
+    this.parentname = parentname;
+    this.blockIndex = blockIndex;
+  }
+
+  public void fillRequiredBlockData(BlocksChunkHolder blockChunkHolder) {
+    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
+      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
+          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
+    }
+    children.fillRequiredBlockData(blockChunkHolder);
+  }
+
+  /**
+   * Method will copy the block chunk holder data to the passed
+   * byte[], this method is also used by child
+   *
+   * @param rowNumber
+   * @param input
+   */
+  protected void copyBlockDataChunk(DimensionColumnDataChunk[] dimensionColumnDataChunks,
+      int rowNumber, byte[] input) {
+    byte[] data = (byte[]) dimensionColumnDataChunks[blockIndex].getCompleteDataChunk();
+    if (null != dimensionColumnDataChunks[blockIndex].getAttributes().getInvertedIndexes()) {
+      System.arraycopy(data, dimensionColumnDataChunks[blockIndex].getAttributes()
+              .getInvertedIndexesReverse()[rowNumber] * dimensionColumnDataChunks[blockIndex]
+              .getAttributes().getColumnValueSize(), input, 0,
+          dimensionColumnDataChunks[blockIndex].getAttributes().getColumnValueSize());
+    } else {
+      System.arraycopy(data,
+          rowNumber * dimensionColumnDataChunks[blockIndex].getAttributes().getColumnValueSize(),
+          input, 0, dimensionColumnDataChunks[blockIndex].getAttributes().getColumnValueSize());
+    }
+  }
+
+  /*
+   * This method will read the block data chunk from the respective block
+   */
+  protected void readBlockDataChunk(BlocksChunkHolder blockChunkHolder) {
+    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
+      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
+          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/complextypes/PrimitiveQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/complextypes/PrimitiveQueryType.java b/core/src/main/java/org/apache/carbondata/scan/complextypes/PrimitiveQueryType.java
new file mode 100644
index 0000000..11f4651
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/complextypes/PrimitiveQueryType.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.complextypes;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
+import org.apache.carbondata.core.keygenerator.mdkey.Bits;
+import org.apache.carbondata.core.util.DataTypeUtil;
+import org.apache.carbondata.scan.filter.GenericQueryType;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+import org.apache.spark.sql.types.*;
+
+public class PrimitiveQueryType extends ComplexQueryType implements GenericQueryType {
+
+  private int index;
+
+  private String name;
+  private String parentname;
+
+  private int keySize;
+
+  private int blockIndex;
+
+  private Dictionary dictionary;
+
+  private org.apache.carbondata.core.carbon.metadata.datatype.DataType dataType;
+
+  private boolean isDirectDictionary;
+
+  public PrimitiveQueryType(String name, String parentname, int blockIndex,
+      org.apache.carbondata.core.carbon.metadata.datatype.DataType dataType, int keySize,
+      Dictionary dictionary, boolean isDirectDictionary) {
+    super(name, parentname, blockIndex);
+    this.dataType = dataType;
+    this.keySize = keySize;
+    this.dictionary = dictionary;
+    this.name = name;
+    this.parentname = parentname;
+    this.blockIndex = blockIndex;
+    this.isDirectDictionary = isDirectDictionary;
+  }
+
+  @Override public void addChildren(GenericQueryType children) {
+
+  }
+
+  @Override public String getName() {
+    return name;
+  }
+
+  @Override public void setName(String name) {
+    this.name = name;
+  }
+
+  @Override public String getParentname() {
+    return parentname;
+  }
+
+  @Override public void setParentname(String parentname) {
+    this.parentname = parentname;
+
+  }
+
+  @Override public void getAllPrimitiveChildren(List<GenericQueryType> primitiveChild) {
+
+  }
+
+  @Override public int getSurrogateIndex() {
+    return index;
+  }
+
+  @Override public void setSurrogateIndex(int surrIndex) {
+    index = surrIndex;
+  }
+
+  @Override public int getBlockIndex() {
+    return blockIndex;
+  }
+
+  @Override public void setBlockIndex(int blockIndex) {
+    this.blockIndex = blockIndex;
+  }
+
+  @Override public int getColsCount() {
+    return 1;
+  }
+
+  @Override public void parseBlocksAndReturnComplexColumnByteArray(
+      DimensionColumnDataChunk[] dimensionDataChunks, int rowNumber,
+      DataOutputStream dataOutputStream) throws IOException {
+    byte[] currentVal =
+        new byte[dimensionDataChunks[blockIndex].getAttributes().getColumnValueSize()];
+    copyBlockDataChunk(dimensionDataChunks, rowNumber, currentVal);
+    dataOutputStream.write(currentVal);
+  }
+
+  @Override public void setKeySize(int[] keyBlockSize) {
+    this.keySize = keyBlockSize[this.blockIndex];
+  }
+
+  @Override public void parseAndGetResultBytes(ByteBuffer complexData, DataOutputStream dataOutput)
+      throws IOException {
+  }
+
+  @Override public DataType getSchemaType() {
+    switch (dataType) {
+      case INT:
+        return IntegerType$.MODULE$;
+      case DOUBLE:
+        return DoubleType$.MODULE$;
+      case LONG:
+        return LongType$.MODULE$;
+      case BOOLEAN:
+        return BooleanType$.MODULE$;
+      case TIMESTAMP:
+        return TimestampType$.MODULE$;
+      default:
+        return IntegerType$.MODULE$;
+    }
+  }
+
+  @Override public int getKeyOrdinalForQuery() {
+    return 0;
+  }
+
+  @Override public void setKeyOrdinalForQuery(int keyOrdinalForQuery) {
+  }
+
+  @Override public void fillRequiredBlockData(BlocksChunkHolder blockChunkHolder) {
+    readBlockDataChunk(blockChunkHolder);
+  }
+
+  @Override public Object getDataBasedOnDataTypeFromSurrogates(ByteBuffer surrogateData) {
+
+    byte[] data = new byte[keySize];
+    surrogateData.get(data);
+    Bits bit = new Bits(new int[]{keySize * 8});
+    int surrgateValue = (int)bit.getKeyArray(data, 0)[0];
+    Object actualData = null;
+    if (isDirectDictionary) {
+      DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
+          .getDirectDictionaryGenerator(dataType);
+      actualData = directDictionaryGenerator.getValueFromSurrogate(surrgateValue);
+    } else {
+      String dictionaryValueForKey = dictionary.getDictionaryValueForKey(surrgateValue);
+      actualData = DataTypeUtil.getDataBasedOnDataType(dictionaryValueForKey, this.dataType);
+    }
+    return actualData;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/complextypes/StructQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/complextypes/StructQueryType.java b/core/src/main/java/org/apache/carbondata/scan/complextypes/StructQueryType.java
new file mode 100644
index 0000000..a8b188b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/complextypes/StructQueryType.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.complextypes;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.scan.filter.GenericQueryType;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+import org.apache.spark.sql.catalyst.expressions.GenericInternalRowWithSchema;
+import org.apache.spark.sql.types.DataType;
+import org.apache.spark.sql.types.Metadata;
+import org.apache.spark.sql.types.StructField;
+import org.apache.spark.sql.types.StructType;
+
+public class StructQueryType extends ComplexQueryType implements GenericQueryType {
+
+  private List<GenericQueryType> children = new ArrayList<GenericQueryType>();
+  private String name;
+  private String parentname;
+  private int blockIndex;
+  private int keyOrdinalForQuery;
+
+  public StructQueryType(String name, String parentname, int blockIndex) {
+    super(name, parentname, blockIndex);
+    this.name = name;
+    this.parentname = parentname;
+    this.blockIndex = blockIndex;
+  }
+
+  @Override public void addChildren(GenericQueryType newChild) {
+    if (this.getName().equals(newChild.getParentname())) {
+      this.children.add(newChild);
+    } else {
+      for (GenericQueryType child : this.children) {
+        child.addChildren(newChild);
+      }
+    }
+
+  }
+
+  @Override public String getName() {
+    return name;
+  }
+
+  @Override public void setName(String name) {
+    this.name = name;
+  }
+
+  @Override public String getParentname() {
+    return parentname;
+  }
+
+  @Override public void setParentname(String parentname) {
+    this.parentname = parentname;
+
+  }
+
+  @Override public void getAllPrimitiveChildren(List<GenericQueryType> primitiveChild) {
+    for (int i = 0; i < children.size(); i++) {
+      GenericQueryType child = children.get(i);
+      if (child instanceof PrimitiveQueryType) {
+        primitiveChild.add(child);
+      } else {
+        child.getAllPrimitiveChildren(primitiveChild);
+      }
+    }
+  }
+
+  @Override public int getSurrogateIndex() {
+    return 0;
+  }
+
+  @Override public void setSurrogateIndex(int surrIndex) {
+
+  }
+
+  @Override public int getBlockIndex() {
+    return blockIndex;
+  }
+
+  @Override public void setBlockIndex(int blockIndex) {
+    this.blockIndex = blockIndex;
+  }
+
+  @Override public int getColsCount() {
+    int colsCount = 1;
+    for (int i = 0; i < children.size(); i++) {
+      colsCount += children.get(i).getColsCount();
+    }
+    return colsCount;
+  }
+
+  @Override public void parseBlocksAndReturnComplexColumnByteArray(
+      DimensionColumnDataChunk[] dimensionColumnDataChunks, int rowNumber,
+      DataOutputStream dataOutputStream) throws IOException {
+    byte[] input = new byte[8];
+    copyBlockDataChunk(dimensionColumnDataChunks, rowNumber, input);
+    ByteBuffer byteArray = ByteBuffer.wrap(input);
+    int childElement = byteArray.getInt();
+    dataOutputStream.writeInt(childElement);
+    if (childElement == 0) {
+      // b.putInt(0);
+    } else {
+      for (int i = 0; i < childElement; i++) {
+        children.get(i)
+            .parseBlocksAndReturnComplexColumnByteArray(dimensionColumnDataChunks, rowNumber,
+                dataOutputStream);
+      }
+    }
+  }
+
+  @Override public void parseAndGetResultBytes(ByteBuffer complexData, DataOutputStream dataOutput)
+      throws IOException {
+    int childElement = complexData.getInt();
+    dataOutput.writeInt(childElement);
+    for (int i = 0; i < childElement; i++) {
+      children.get(i).parseAndGetResultBytes(complexData, dataOutput);
+    }
+  }
+
+  @Override public void setKeySize(int[] keyBlockSize) {
+    for (int i = 0; i < children.size(); i++) {
+      children.get(i).setKeySize(keyBlockSize);
+    }
+  }
+
+  @Override public DataType getSchemaType() {
+    StructField[] fields = new StructField[children.size()];
+    for (int i = 0; i < children.size(); i++) {
+      fields[i] = new StructField(children.get(i).getName(), null, true,
+          Metadata.empty());
+    }
+    return new StructType(fields);
+  }
+
+  @Override public int getKeyOrdinalForQuery() {
+    return keyOrdinalForQuery;
+  }
+
+  @Override public void setKeyOrdinalForQuery(int keyOrdinalForQuery) {
+    this.keyOrdinalForQuery = keyOrdinalForQuery;
+  }
+
+  @Override public void fillRequiredBlockData(BlocksChunkHolder blockChunkHolder) {
+    readBlockDataChunk(blockChunkHolder);
+
+    for (int i = 0; i < children.size(); i++) {
+      children.get(i).fillRequiredBlockData(blockChunkHolder);
+    }
+  }
+
+  @Override public Object getDataBasedOnDataTypeFromSurrogates(ByteBuffer surrogateData) {
+    int childLength = surrogateData.getInt();
+    Object[] fields = new Object[childLength];
+    for (int i = 0; i < childLength; i++) {
+      fields[i] =  children.get(i).getDataBasedOnDataTypeFromSurrogates(surrogateData);
+    }
+
+    return new GenericInternalRowWithSchema(fields, (StructType) getSchemaType());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/QueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/QueryExecutor.java b/core/src/main/java/org/apache/carbondata/scan/executor/QueryExecutor.java
new file mode 100644
index 0000000..53bf8ca
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/QueryExecutor.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.model.QueryModel;
+
+/**
+ * Interface for carbon query executor.
+ * Will be used to execute the query based on the query model
+ * and will return the iterator over query result
+ */
+public interface QueryExecutor<E> {
+
+  /**
+   * Below method will be used to execute the query based on query model passed from driver
+   *
+   * @param queryModel query details
+   * @return query result iterator
+   * @throws QueryExecutionException if any failure while executing the query
+   */
+  CarbonIterator<E> execute(QueryModel queryModel) throws QueryExecutionException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/QueryExecutorFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/QueryExecutorFactory.java b/core/src/main/java/org/apache/carbondata/scan/executor/QueryExecutorFactory.java
new file mode 100644
index 0000000..ab75231
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/QueryExecutorFactory.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor;
+
+import org.apache.carbondata.scan.executor.impl.DetailQueryExecutor;
+import org.apache.carbondata.scan.model.QueryModel;
+
+/**
+ * Factory class to get the query executor from RDD
+ * This will return the executor based on query type
+ */
+public class QueryExecutorFactory {
+
+  public static QueryExecutor getQueryExecutor(QueryModel queryModel) {
+    return new DetailQueryExecutor();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/exception/QueryExecutionException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/exception/QueryExecutionException.java b/core/src/main/java/org/apache/carbondata/scan/executor/exception/QueryExecutionException.java
new file mode 100644
index 0000000..7003184
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/exception/QueryExecutionException.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor.exception;
+
+import java.util.Locale;
+
+/**
+ * Exception class for query execution
+ *
+ * @author Administrator
+ */
+public class QueryExecutionException extends Exception {
+
+  /**
+   * default serial version ID.
+   */
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * The Error message.
+   */
+  private String msg = "";
+
+  /**
+   * Constructor
+   *
+   * @param errorCode The error code for this exception.
+   * @param msg       The error message for this exception.
+   */
+  public QueryExecutionException(String msg) {
+    super(msg);
+    this.msg = msg;
+  }
+
+  /**
+   * Constructor
+   *
+   * @param errorCode The error code for this exception.
+   * @param msg       The error message for this exception.
+   */
+  public QueryExecutionException(String msg, Throwable t) {
+    super(msg, t);
+    this.msg = msg;
+  }
+
+  /**
+   * Constructor
+   *
+   * @param t
+   */
+  public QueryExecutionException(Throwable t) {
+    super(t);
+  }
+
+  /**
+   * This method is used to get the localized message.
+   *
+   * @param locale - A Locale object represents a specific geographical,
+   *               political, or cultural region.
+   * @return - Localized error message.
+   */
+  public String getLocalizedMessage(Locale locale) {
+    return "";
+  }
+
+  /**
+   * getLocalizedMessage
+   */
+  @Override public String getLocalizedMessage() {
+    return super.getLocalizedMessage();
+  }
+
+  /**
+   * getMessage
+   */
+  public String getMessage() {
+    return this.msg;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/impl/AbstractQueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/impl/AbstractQueryExecutor.java b/core/src/main/java/org/apache/carbondata/scan/executor/impl/AbstractQueryExecutor.java
new file mode 100644
index 0000000..e204572
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/impl/AbstractQueryExecutor.java
@@ -0,0 +1,412 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor.impl;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.common.logging.impl.StandardLogService;
+import org.apache.carbondata.core.carbon.datastore.BlockIndexStore;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.carbon.datastore.block.AbstractIndex;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.exception.IndexBuilderException;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.carbon.querystatistics.QueryStatistic;
+import org.apache.carbondata.core.carbon.querystatistics.QueryStatisticsRecorder;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.scan.executor.QueryExecutor;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.AggregatorInfo;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+import org.apache.carbondata.scan.executor.util.QueryUtil;
+import org.apache.carbondata.scan.executor.util.RestructureUtil;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.model.QueryDimension;
+import org.apache.carbondata.scan.model.QueryMeasure;
+import org.apache.carbondata.scan.model.QueryModel;
+
+import org.apache.commons.lang3.ArrayUtils;
+
+/**
+ * This class provides a skeletal implementation of the {@link QueryExecutor}
+ * interface to minimize the effort required to implement this interface. This
+ * will be used to prepare all the properties required for query execution
+ */
+public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(AbstractQueryExecutor.class.getName());
+  /**
+   * holder for query properties which will be used to execute the query
+   */
+  protected QueryExecutorProperties queryProperties;
+
+  public AbstractQueryExecutor() {
+    queryProperties = new QueryExecutorProperties();
+  }
+
+  /**
+   * Below method will be used to fill the executor properties based on query
+   * model it will parse the query model and get the detail and fill it in
+   * query properties
+   *
+   * @param queryModel
+   */
+  protected void initQuery(QueryModel queryModel) throws QueryExecutionException {
+    StandardLogService.setThreadName(StandardLogService.getPartitionID(
+        queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName()),
+        queryModel.getQueryId());
+    LOGGER.info("Query will be executed on table: " + queryModel.getAbsoluteTableIdentifier()
+        .getCarbonTableIdentifier().getTableName());
+    // Initializing statistics list to record the query statistics
+    // creating copy on write to handle concurrent scenario
+    queryProperties.queryStatisticsRecorder = new QueryStatisticsRecorder(queryModel.getQueryId());
+    queryModel.setStatisticsRecorder(queryProperties.queryStatisticsRecorder);
+    QueryUtil.resolveQueryModel(queryModel);
+    QueryStatistic queryStatistic = new QueryStatistic();
+    // get the table blocks
+    try {
+      queryProperties.dataBlocks = BlockIndexStore.getInstance()
+          .loadAndGetBlocks(queryModel.getTableBlockInfos(),
+              queryModel.getAbsoluteTableIdentifier());
+    } catch (IndexBuilderException e) {
+      throw new QueryExecutionException(e);
+    }
+    queryStatistic
+        .addStatistics("Time taken to load the Block(s) In Executor", System.currentTimeMillis());
+    queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
+    //
+    // // updating the restructuring infos for the query
+    queryProperties.keyStructureInfo = getKeyStructureInfo(queryModel,
+        queryProperties.dataBlocks.get(queryProperties.dataBlocks.size() - 1).getSegmentProperties()
+            .getDimensionKeyGenerator());
+
+    // calculating the total number of aggeragted columns
+    int aggTypeCount = queryModel.getQueryMeasures().size();
+
+    int currentIndex = 0;
+    String[] aggTypes = new String[aggTypeCount];
+    DataType[] dataTypes = new DataType[aggTypeCount];
+
+    for (QueryMeasure carbonMeasure : queryModel.getQueryMeasures()) {
+      // adding the data type and aggregation type of all the measure this
+      // can be used
+      // to select the aggregator
+      aggTypes[currentIndex] = carbonMeasure.getAggregateFunction();
+      dataTypes[currentIndex] = carbonMeasure.getMeasure().getDataType();
+      currentIndex++;
+    }
+    queryProperties.measureDataTypes = dataTypes;
+    // as aggregation will be executed in following order
+    // 1.aggregate dimension expression
+    // 2. expression
+    // 3. query measure
+    // so calculating the index of the expression start index
+    // and measure column start index
+    queryProperties.aggExpressionStartIndex = queryModel.getQueryMeasures().size();
+    queryProperties.measureStartIndex = aggTypes.length - queryModel.getQueryMeasures().size();
+
+    queryProperties.complexFilterDimension =
+        QueryUtil.getAllFilterDimensions(queryModel.getFilterExpressionResolverTree());
+    queryStatistic = new QueryStatistic();
+    // dictionary column unique column id to dictionary mapping
+    // which will be used to get column actual data
+    queryProperties.columnToDictionayMapping = QueryUtil
+        .getDimensionDictionaryDetail(queryModel.getQueryDimension(),
+            queryProperties.complexFilterDimension, queryModel.getAbsoluteTableIdentifier());
+    queryStatistic
+        .addStatistics("Time taken to load the Dictionary In Executor", System.currentTimeMillis());
+    queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
+    queryModel.setColumnToDictionaryMapping(queryProperties.columnToDictionayMapping);
+    // setting the sort dimension index. as it will be updated while getting the sort info
+    // so currently setting it to default 0 means sort is not present in any dimension
+    queryProperties.sortDimIndexes = new byte[queryModel.getQueryDimension().size()];
+  }
+
+  /**
+   * Below method will be used to get the key structure info for the uqery
+   *
+   * @param queryModel   query model
+   * @param keyGenerator
+   * @return key structure info
+   */
+  private KeyStructureInfo getKeyStructureInfo(QueryModel queryModel, KeyGenerator keyGenerator) {
+    // getting the masked byte range for dictionary column
+    int[] maskByteRanges =
+        QueryUtil.getMaskedByteRange(queryModel.getQueryDimension(), keyGenerator);
+
+    // getting the masked bytes for query dimension dictionary column
+    int[] maskedBytes = QueryUtil.getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
+
+    // max key for the dictionary dimension present in the query
+    byte[] maxKey = null;
+    try {
+      // getting the max key which will be used to masked and get the
+      // masked key
+      maxKey = QueryUtil.getMaxKeyBasedOnDimensions(queryModel.getQueryDimension(), keyGenerator);
+    } catch (KeyGenException e) {
+      LOGGER.error(e, "problem while getting the max key");
+    }
+
+    KeyStructureInfo restructureInfos = new KeyStructureInfo();
+    restructureInfos.setKeyGenerator(keyGenerator);
+    restructureInfos.setMaskByteRanges(maskByteRanges);
+    restructureInfos.setMaskedBytes(maskedBytes);
+    restructureInfos.setMaxKey(maxKey);
+    return restructureInfos;
+  }
+
+  protected List<BlockExecutionInfo> getBlockExecutionInfos(QueryModel queryModel)
+      throws QueryExecutionException {
+    initQuery(queryModel);
+    List<BlockExecutionInfo> blockExecutionInfoList = new ArrayList<BlockExecutionInfo>();
+    // fill all the block execution infos for all the blocks selected in
+    // query
+    // and query will be executed based on that infos
+    for (int i = 0; i < queryProperties.dataBlocks.size(); i++) {
+      blockExecutionInfoList
+          .add(getBlockExecutionInfoForBlock(queryModel, queryProperties.dataBlocks.get(i)));
+    }
+    queryProperties.complexDimensionInfoMap =
+        blockExecutionInfoList.get(blockExecutionInfoList.size() - 1).getComlexDimensionInfoMap();
+    return blockExecutionInfoList;
+  }
+
+  /**
+   * Below method will be used to get the block execution info which is
+   * required to execute any block  based on query model
+   *
+   * @param queryModel query model from user query
+   * @param blockIndex block index
+   * @return block execution info
+   * @throws QueryExecutionException any failure during block info creation
+   */
+  protected BlockExecutionInfo getBlockExecutionInfoForBlock(QueryModel queryModel,
+      AbstractIndex blockIndex) throws QueryExecutionException {
+    BlockExecutionInfo blockExecutionInfo = new BlockExecutionInfo();
+    SegmentProperties segmentProperties = blockIndex.getSegmentProperties();
+    List<CarbonDimension> tableBlockDimensions = segmentProperties.getDimensions();
+    KeyGenerator blockKeyGenerator = segmentProperties.getDimensionKeyGenerator();
+
+    // below is to get only those dimension in query which is present in the
+    // table block
+    List<QueryDimension> updatedQueryDimension = RestructureUtil
+        .getUpdatedQueryDimension(queryModel.getQueryDimension(), tableBlockDimensions,
+            segmentProperties.getComplexDimensions());
+    // TODO add complex dimension children
+    int[] maskByteRangesForBlock =
+        QueryUtil.getMaskedByteRange(updatedQueryDimension, blockKeyGenerator);
+    int[] maksedByte =
+        QueryUtil.getMaskedByte(blockKeyGenerator.getKeySizeInBytes(), maskByteRangesForBlock);
+    blockExecutionInfo.setQueryDimensions(
+        updatedQueryDimension.toArray(new QueryDimension[updatedQueryDimension.size()]));
+    blockExecutionInfo.setQueryMeasures(queryModel.getQueryMeasures()
+        .toArray(new QueryMeasure[queryModel.getQueryMeasures().size()]));
+    blockExecutionInfo.setDataBlock(blockIndex);
+    blockExecutionInfo.setBlockKeyGenerator(blockKeyGenerator);
+    // adding aggregation info for query
+    blockExecutionInfo.setAggregatorInfo(getAggregatorInfoForBlock(queryModel, blockIndex));
+    // adding query statistics list to record the statistics
+    blockExecutionInfo.setStatisticsRecorder(queryProperties.queryStatisticsRecorder);
+    // setting the limit
+    blockExecutionInfo.setLimit(queryModel.getLimit());
+    // setting whether detail query or not
+    blockExecutionInfo.setDetailQuery(queryModel.isDetailQuery());
+    // setting whether raw record query or not
+    blockExecutionInfo.setRawRecordDetailQuery(queryModel.isForcedDetailRawQuery());
+    // setting the masked byte of the block which will be
+    // used to update the unpack the older block keys
+    blockExecutionInfo.setMaskedByteForBlock(maksedByte);
+    // total number dimension
+    blockExecutionInfo
+        .setTotalNumberDimensionBlock(segmentProperties.getDimensionOrdinalToBlockMapping().size());
+    blockExecutionInfo
+        .setTotalNumberOfMeasureBlock(segmentProperties.getMeasuresOrdinalToBlockMapping().size());
+    blockExecutionInfo.setComplexDimensionInfoMap(QueryUtil
+        .getComplexDimensionsMap(updatedQueryDimension,
+            segmentProperties.getDimensionOrdinalToBlockMapping(),
+            segmentProperties.getEachComplexDimColumnValueSize(),
+            queryProperties.columnToDictionayMapping, queryProperties.complexFilterDimension));
+    // to check whether older block key update is required or not
+    blockExecutionInfo.setFixedKeyUpdateRequired(
+        !blockKeyGenerator.equals(queryProperties.keyStructureInfo.getKeyGenerator()));
+    IndexKey startIndexKey = null;
+    IndexKey endIndexKey = null;
+    if (null != queryModel.getFilterExpressionResolverTree()) {
+      // loading the filter executer tree for filter evaluation
+      blockExecutionInfo.setFilterExecuterTree(FilterUtil
+          .getFilterExecuterTree(queryModel.getFilterExpressionResolverTree(), segmentProperties,
+              blockExecutionInfo.getComlexDimensionInfoMap()));
+      List<IndexKey> listOfStartEndKeys = new ArrayList<IndexKey>(2);
+      FilterUtil.traverseResolverTreeAndGetStartAndEndKey(segmentProperties,
+          queryModel.getAbsoluteTableIdentifier(), queryModel.getFilterExpressionResolverTree(),
+          listOfStartEndKeys);
+      startIndexKey = listOfStartEndKeys.get(0);
+      endIndexKey = listOfStartEndKeys.get(1);
+    } else {
+      try {
+        startIndexKey = FilterUtil.prepareDefaultStartIndexKey(segmentProperties);
+        endIndexKey = FilterUtil.prepareDefaultEndIndexKey(segmentProperties);
+      } catch (KeyGenException e) {
+        throw new QueryExecutionException(e);
+      }
+    }
+    blockExecutionInfo.setFileType(
+        FileFactory.getFileType(queryModel.getAbsoluteTableIdentifier().getStorePath()));
+    //setting the start index key of the block node
+    blockExecutionInfo.setStartKey(startIndexKey);
+    //setting the end index key of the block node
+    blockExecutionInfo.setEndKey(endIndexKey);
+    // expression dimensions
+    List<CarbonDimension> expressionDimensions =
+        new ArrayList<CarbonDimension>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    // expression measure
+    List<CarbonMeasure> expressionMeasures =
+        new ArrayList<CarbonMeasure>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    // setting all the dimension chunk indexes to be read from file
+    blockExecutionInfo.setAllSelectedDimensionBlocksIndexes(QueryUtil
+        .getDimensionsBlockIndexes(updatedQueryDimension,
+            segmentProperties.getDimensionOrdinalToBlockMapping(), expressionDimensions));
+    // setting all the measure chunk indexes to be read from file
+    blockExecutionInfo.setAllSelectedMeasureBlocksIndexes(QueryUtil
+        .getMeasureBlockIndexes(queryModel.getQueryMeasures(), expressionMeasures,
+            segmentProperties.getMeasuresOrdinalToBlockMapping()));
+    // setting the key structure info which will be required
+    // to update the older block key with new key generator
+    blockExecutionInfo.setKeyStructureInfo(queryProperties.keyStructureInfo);
+    // setting the size of fixed key column (dictionary column)
+    blockExecutionInfo.setFixedLengthKeySize(getKeySize(updatedQueryDimension, segmentProperties));
+    Set<Integer> dictionaryColumnBlockIndex = new HashSet<Integer>();
+    List<Integer> noDictionaryColumnBlockIndex = new ArrayList<Integer>();
+    // get the block index to be read from file for query dimension
+    // for both dictionary columns and no dictionary columns
+    QueryUtil.fillQueryDimensionsBlockIndexes(updatedQueryDimension,
+        segmentProperties.getDimensionOrdinalToBlockMapping(), dictionaryColumnBlockIndex,
+        noDictionaryColumnBlockIndex);
+    int[] queryDictionaryColumnBlockIndexes = ArrayUtils.toPrimitive(
+        dictionaryColumnBlockIndex.toArray(new Integer[dictionaryColumnBlockIndex.size()]));
+    // need to sort the dictionary column as for all dimension
+    // column key will be filled based on key order
+    Arrays.sort(queryDictionaryColumnBlockIndexes);
+    blockExecutionInfo.setDictionaryColumnBlockIndex(queryDictionaryColumnBlockIndexes);
+    // setting the no dictionary column block indexes
+    blockExecutionInfo.setNoDictionaryBlockIndexes(ArrayUtils.toPrimitive(
+        noDictionaryColumnBlockIndex.toArray(new Integer[noDictionaryColumnBlockIndex.size()])));
+    // setting column id to dictionary mapping
+    blockExecutionInfo.setColumnIdToDcitionaryMapping(queryProperties.columnToDictionayMapping);
+    // setting each column value size
+    blockExecutionInfo.setEachColumnValueSize(segmentProperties.getEachDimColumnValueSize());
+    blockExecutionInfo.setComplexColumnParentBlockIndexes(
+        getComplexDimensionParentBlockIndexes(updatedQueryDimension));
+    try {
+      // to set column group and its key structure info which will be used
+      // to
+      // for getting the column group column data in case of final row
+      // and in case of dimension aggregation
+      blockExecutionInfo.setColumnGroupToKeyStructureInfo(
+          QueryUtil.getColumnGroupKeyStructureInfo(updatedQueryDimension, segmentProperties));
+    } catch (KeyGenException e) {
+      throw new QueryExecutionException(e);
+    }
+    return blockExecutionInfo;
+  }
+
+  /**
+   * This method will be used to get fixed key length size this will be used
+   * to create a row from column chunk
+   *
+   * @param queryDimension    query dimension
+   * @param blockMetadataInfo block metadata info
+   * @return key size
+   */
+  private int getKeySize(List<QueryDimension> queryDimension, SegmentProperties blockMetadataInfo) {
+    List<Integer> fixedLengthDimensionOrdinal =
+        new ArrayList<Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    int counter = 0;
+    while (counter < queryDimension.size()) {
+      if (queryDimension.get(counter).getDimension().numberOfChild() > 0) {
+        counter += queryDimension.get(counter).getDimension().numberOfChild();
+        continue;
+      } else if (!CarbonUtil.hasEncoding(queryDimension.get(counter).getDimension().getEncoder(),
+          Encoding.DICTIONARY)) {
+        counter++;
+      } else {
+        fixedLengthDimensionOrdinal.add(queryDimension.get(counter).getDimension().getKeyOrdinal());
+        counter++;
+      }
+    }
+    int[] dictioanryColumnOrdinal = ArrayUtils.toPrimitive(
+        fixedLengthDimensionOrdinal.toArray(new Integer[fixedLengthDimensionOrdinal.size()]));
+    if (dictioanryColumnOrdinal.length > 0) {
+      return blockMetadataInfo.getFixedLengthKeySplitter()
+          .getKeySizeByBlock(dictioanryColumnOrdinal);
+    }
+    return 0;
+  }
+
+  /**
+   * Below method will be used to get the aggrgator info for the query
+   *
+   * @param queryModel query model
+   * @param tableBlock table block
+   * @return aggregator info
+   */
+  private AggregatorInfo getAggregatorInfoForBlock(QueryModel queryModel,
+      AbstractIndex tableBlock) {
+    // getting the aggregate infos which will be used during aggregation
+    AggregatorInfo aggregatorInfos = RestructureUtil
+        .getAggregatorInfos(queryModel.getQueryMeasures(),
+            tableBlock.getSegmentProperties().getMeasures());
+    // setting the index of expression in measure aggregators
+    aggregatorInfos.setExpressionAggregatorStartIndex(queryProperties.aggExpressionStartIndex);
+    // setting the index of measure columns in measure aggregators
+    aggregatorInfos.setMeasureAggregatorStartIndex(queryProperties.measureStartIndex);
+    // setting the measure aggregator for all aggregation function selected
+    // in query
+    aggregatorInfos.setMeasureDataTypes(queryProperties.measureDataTypes);
+    return aggregatorInfos;
+  }
+
+  private int[] getComplexDimensionParentBlockIndexes(List<QueryDimension> queryDimensions) {
+    List<Integer> parentBlockIndexList = new ArrayList<Integer>();
+    for (QueryDimension queryDimension : queryDimensions) {
+      if (CarbonUtil.hasDataType(queryDimension.getDimension().getDataType(),
+          new DataType[] { DataType.ARRAY, DataType.STRUCT, DataType.MAP })) {
+        parentBlockIndexList.add(queryDimension.getDimension().getOrdinal());
+      }
+    }
+    return ArrayUtils
+        .toPrimitive(parentBlockIndexList.toArray(new Integer[parentBlockIndexList.size()]));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/impl/DetailQueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/impl/DetailQueryExecutor.java b/core/src/main/java/org/apache/carbondata/scan/executor/impl/DetailQueryExecutor.java
new file mode 100644
index 0000000..716cdc7
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/impl/DetailQueryExecutor.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor.impl;
+
+import java.util.List;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.model.QueryModel;
+import org.apache.carbondata.scan.result.iterator.DetailQueryResultIterator;
+
+/**
+ * Below class will be used to execute the detail query
+ * For executing the detail query it will pass all the block execution
+ * info to detail query result iterator and iterator will be returned
+ */
+public class DetailQueryExecutor extends AbstractQueryExecutor {
+
+  @Override public CarbonIterator<Object[]> execute(QueryModel queryModel)
+      throws QueryExecutionException {
+    List<BlockExecutionInfo> blockExecutionInfoList = getBlockExecutionInfos(queryModel);
+    return new DetailQueryResultIterator(blockExecutionInfoList, queryModel);
+  }
+
+}


[22/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/block/SegmentProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/block/SegmentProperties.java b/core/src/main/java/org/carbondata/core/carbon/datastore/block/SegmentProperties.java
deleted file mode 100644
index df99b4b..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/block/SegmentProperties.java
+++ /dev/null
@@ -1,748 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.block;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.columnar.ColumnGroupModel;
-import org.carbondata.core.keygenerator.KeyGenerator;
-import org.carbondata.core.keygenerator.columnar.ColumnarSplitter;
-import org.carbondata.core.keygenerator.columnar.impl.MultiDimKeyVarLengthVariableSplitGenerator;
-import org.carbondata.core.keygenerator.mdkey.MultiDimKeyVarLengthGenerator;
-import org.carbondata.core.util.CarbonUtil;
-
-import org.apache.commons.lang3.ArrayUtils;
-
-/**
- * This class contains all the details about the restructuring information of
- * the block. This will be used during query execution to handle restructure
- * information
- */
-public class SegmentProperties {
-
-  /**
-   * key generator of the block which was used to generate the mdkey for
-   * normal dimension. this will be required to
-   */
-  private KeyGenerator dimensionKeyGenerator;
-
-  /**
-   * list of dimension present in the block
-   */
-  private List<CarbonDimension> dimensions;
-
-  /**
-   * list of dimension present in the block
-   */
-  private List<CarbonDimension> complexDimensions;
-
-  /**
-   * list of measure present in the block
-   */
-  private List<CarbonMeasure> measures;
-
-  /**
-   * cardinality of dimension columns participated in key generator
-   */
-  private int[] dimColumnsCardinality;
-
-  /**
-   * cardinality of complex dimension
-   */
-  private int[] complexDimColumnCardinality;
-
-  /**
-   * mapping of dimension column to block in a file this will be used for
-   * reading the blocks from file
-   */
-  private Map<Integer, Integer> dimensionOrdinalToBlockMapping;
-
-  /**
-   * a block can have multiple columns. This will have block index as key
-   * and all dimension participated in that block as values
-   */
-  private Map<Integer, Set<Integer>> blockTodimensionOrdinalMapping;
-
-  /**
-   * mapping of measure column to block to in file this will be used while
-   * reading the block in a file
-   */
-  private Map<Integer, Integer> measuresOrdinalToBlockMapping;
-
-  /**
-   * size of the each dimension column value in a block this can be used when
-   * we need to do copy a cell value to create a tuple.for no dictionary
-   * column this value will be -1. for dictionary column we size of the value
-   * will be fixed.
-   */
-  private int[] eachDimColumnValueSize;
-
-  /**
-   * size of the each dimension column value in a block this can be used when
-   * we need to do copy a cell value to create a tuple.for no dictionary
-   * column this value will be -1. for dictionary column we size of the value
-   * will be fixed.
-   */
-  private int[] eachComplexDimColumnValueSize;
-
-  /**
-   * below mapping will have mapping of the column group to dimensions ordinal
-   * for example if 3 dimension present in the columngroupid 0 and its ordinal in
-   * 2,3,4 then map will contain 0,{2,3,4}
-   */
-  private Map<Integer, KeyGenerator> columnGroupAndItsKeygenartor;
-
-  /**
-   * column group key generator dimension index will not be same as dimension ordinal
-   * This will have mapping with ordinal and keygenerator or mdkey index
-   */
-  private Map<Integer, Map<Integer, Integer>> columnGroupOrdinalToMdkeymapping;
-
-  /**
-   * this will be used to split the fixed length key
-   * this will all the information about how key was created
-   * and how to split the key based on group
-   */
-  private ColumnarSplitter fixedLengthKeySplitter;
-
-  /**
-   * to store the number of no dictionary dimension
-   * this will be used during query execution for creating
-   * start and end key. Purpose of storing this value here is
-   * so during query execution no need to calculate every time
-   */
-  private int numberOfNoDictionaryDimension;
-
-  /**
-   * column group model
-   */
-  private ColumnGroupModel colGroupModel;
-
-  public SegmentProperties(List<ColumnSchema> columnsInTable, int[] columnCardinality) {
-    dimensions = new ArrayList<CarbonDimension>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    complexDimensions =
-        new ArrayList<CarbonDimension>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    measures = new ArrayList<CarbonMeasure>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    fillDimensionAndMeasureDetails(columnsInTable, columnCardinality);
-    dimensionOrdinalToBlockMapping =
-        new HashMap<Integer, Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    blockTodimensionOrdinalMapping =
-        new HashMap<Integer, Set<Integer>>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    measuresOrdinalToBlockMapping =
-        new HashMap<Integer, Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    intialiseColGroups(columnsInTable);
-    fillOrdinalToBlockMappingForDimension();
-    fillOrdinalToBlockIndexMappingForMeasureColumns();
-    fillColumnGroupAndItsCardinality(columnCardinality);
-    fillKeyGeneratorDetails();
-  }
-
-  /**
-   * it fills column groups
-   * e.g {{1},{2,3,4},{5},{6},{7,8,9}}
-   *
-   * @param columnsInTable
-   */
-  private void intialiseColGroups(List<ColumnSchema> columnsInTable) {
-    // StringBuffer columnGroups = new StringBuffer();
-    List<List<Integer>> colGrpList = new ArrayList<List<Integer>>();
-    List<Integer> group = new ArrayList<Integer>();
-    for (int i = 0; i < dimensions.size(); i++) {
-      CarbonDimension dimension = dimensions.get(i);
-      if (!dimension.hasEncoding(Encoding.DICTIONARY)) {
-        continue;
-      }
-      group.add(dimension.getOrdinal());
-      // columnGroups.append(dimension.getOrdinal());
-      if (i < dimensions.size() - 1) {
-        int currGroupOrdinal = dimension.columnGroupId();
-        int nextGroupOrdinal = dimensions.get(i + 1).columnGroupId();
-        if (!(currGroupOrdinal == nextGroupOrdinal && currGroupOrdinal != -1)) {
-          colGrpList.add(group);
-          group = new ArrayList<Integer>();
-        }
-      } else {
-        colGrpList.add(group);
-      }
-
-    }
-    int[][] colGroups = new int[colGrpList.size()][];
-    for (int i = 0; i < colGroups.length; i++) {
-      colGroups[i] = new int[colGrpList.get(i).size()];
-      for (int j = 0; j < colGroups[i].length; j++) {
-        colGroups[i][j] = colGrpList.get(i).get(j);
-      }
-    }
-    this.colGroupModel = CarbonUtil.getColGroupModel(colGroups);
-  }
-
-  /**
-   * below method is to fill the dimension and its mapping to file blocks all
-   * the column will point to same column group
-   */
-  private void fillOrdinalToBlockMappingForDimension() {
-    int blockOrdinal = -1;
-    CarbonDimension dimension = null;
-    int index = 0;
-    int prvcolumnGroupId = -1;
-    while (index < dimensions.size()) {
-      dimension = dimensions.get(index);
-      // if column id is same as previous one then block index will be
-      // same
-      if (dimension.isColumnar() || dimension.columnGroupId() != prvcolumnGroupId) {
-        blockOrdinal++;
-      }
-      dimensionOrdinalToBlockMapping.put(dimension.getOrdinal(), blockOrdinal);
-      prvcolumnGroupId = dimension.columnGroupId();
-      index++;
-    }
-    index = 0;
-    // complex dimension will be stored at last
-    while (index < complexDimensions.size()) {
-      dimension = complexDimensions.get(index);
-      dimensionOrdinalToBlockMapping.put(dimension.getOrdinal(), ++blockOrdinal);
-      blockOrdinal = fillComplexDimensionChildBlockIndex(blockOrdinal, dimension);
-      index++;
-    }
-    fillBlockToDimensionOrdinalMapping();
-  }
-
-  /**
-   *
-   */
-  private void fillBlockToDimensionOrdinalMapping() {
-    Set<Entry<Integer, Integer>> blocks = dimensionOrdinalToBlockMapping.entrySet();
-    Iterator<Entry<Integer, Integer>> blockItr = blocks.iterator();
-    while (blockItr.hasNext()) {
-      Entry<Integer, Integer> block = blockItr.next();
-      Set<Integer> dimensionOrdinals = blockTodimensionOrdinalMapping.get(block.getValue());
-      if (dimensionOrdinals == null) {
-        dimensionOrdinals = new HashSet<Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-        blockTodimensionOrdinalMapping.put(block.getValue(), dimensionOrdinals);
-      }
-      dimensionOrdinals.add(block.getKey());
-    }
-  }
-
-  /**
-   * Below method will be used to add the complex dimension child
-   * block index.It is a recursive method which will be get the children
-   * add the block index
-   *
-   * @param blockOrdinal start block ordinal
-   * @param dimension    parent dimension
-   * @return last block index
-   */
-  private int fillComplexDimensionChildBlockIndex(int blockOrdinal, CarbonDimension dimension) {
-    for (int i = 0; i < dimension.numberOfChild(); i++) {
-      dimensionOrdinalToBlockMapping
-          .put(dimension.getListOfChildDimensions().get(i).getOrdinal(), ++blockOrdinal);
-      if (dimension.getListOfChildDimensions().get(i).numberOfChild() > 0) {
-        blockOrdinal = fillComplexDimensionChildBlockIndex(blockOrdinal,
-            dimension.getListOfChildDimensions().get(i));
-      }
-    }
-    return blockOrdinal;
-  }
-
-  /**
-   * Below method will be used to fill the mapping
-   * of measure ordinal to its block index mapping in
-   * file
-   */
-  private void fillOrdinalToBlockIndexMappingForMeasureColumns() {
-    int blockOrdinal = 0;
-    int index = 0;
-    while (index < measures.size()) {
-      measuresOrdinalToBlockMapping.put(measures.get(index).getOrdinal(), blockOrdinal);
-      blockOrdinal++;
-      index++;
-    }
-  }
-
-  /**
-   * below method will fill dimension and measure detail of the block.
-   *
-   * @param columnsInTable
-   * @param columnCardinality
-   */
-  private void fillDimensionAndMeasureDetails(List<ColumnSchema> columnsInTable,
-      int[] columnCardinality) {
-    ColumnSchema columnSchema = null;
-    // ordinal will be required to read the data from file block
-    int dimensonOrdinal = 0;
-    int measureOrdinal = -1;
-    // table ordinal is actually a schema ordinal this is required as
-    // cardinality array
-    // which is stored in segment info contains -1 if that particular column
-    // is n
-    int tableOrdinal = -1;
-    // creating a list as we do not know how many dimension not participated
-    // in the mdkey
-    List<Integer> cardinalityIndexForNormalDimensionColumn =
-        new ArrayList<Integer>(columnsInTable.size());
-    // creating a list as we do not know how many dimension not participated
-    // in the mdkey
-    List<Integer> cardinalityIndexForComplexDimensionColumn =
-        new ArrayList<Integer>(columnsInTable.size());
-    boolean isComplexDimensionStarted = false;
-    CarbonDimension carbonDimension = null;
-    // to store the position of dimension in surrogate key array which is
-    // participating in mdkey
-    int keyOrdinal = 0;
-    int previousColumnGroup = -1;
-    // to store the ordinal of the column group ordinal
-    int columnGroupOrdinal = 0;
-    int counter = 0;
-    int complexTypeOrdinal = 0;
-    while (counter < columnsInTable.size()) {
-      columnSchema = columnsInTable.get(counter);
-      if (columnSchema.isDimensionColumn()) {
-        tableOrdinal++;
-        // not adding the cardinality of the non dictionary
-        // column as it was not the part of mdkey
-        if (CarbonUtil.hasEncoding(columnSchema.getEncodingList(), Encoding.DICTIONARY)
-            && !isComplexDimensionStarted && columnSchema.getNumberOfChild() == 0) {
-          cardinalityIndexForNormalDimensionColumn.add(tableOrdinal);
-          if (columnSchema.isColumnar()) {
-            // if it is a columnar dimension participated in mdkey then added
-            // key ordinal and dimension ordinal
-            carbonDimension =
-                new CarbonDimension(columnSchema, dimensonOrdinal++, keyOrdinal++, -1, -1);
-          } else {
-            // if not columnnar then it is a column group dimension
-
-            // below code to handle first dimension of the column group
-            // in this case ordinal of the column group will be 0
-            if (previousColumnGroup != columnSchema.getColumnGroupId()) {
-              columnGroupOrdinal = 0;
-              carbonDimension = new CarbonDimension(columnSchema, dimensonOrdinal++, keyOrdinal++,
-                  columnGroupOrdinal++, -1);
-            }
-            // if previous dimension  column group id is same as current then
-            // then its belongs to same row group
-            else {
-              carbonDimension = new CarbonDimension(columnSchema, dimensonOrdinal++, keyOrdinal++,
-                  columnGroupOrdinal++, -1);
-            }
-            previousColumnGroup = columnSchema.getColumnGroupId();
-          }
-        }
-        // as complex type will be stored at last so once complex type started all the dimension
-        // will be added to complex type
-        else if (isComplexDimensionStarted || CarbonUtil.hasDataType(columnSchema.getDataType(),
-            new DataType[] { DataType.ARRAY, DataType.STRUCT })) {
-          cardinalityIndexForComplexDimensionColumn.add(tableOrdinal);
-          carbonDimension =
-              new CarbonDimension(columnSchema, dimensonOrdinal++, -1, -1, complexTypeOrdinal++);
-          carbonDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
-          complexDimensions.add(carbonDimension);
-          isComplexDimensionStarted = true;
-          int previouseOrdinal = dimensonOrdinal;
-          dimensonOrdinal =
-              readAllComplexTypeChildrens(dimensonOrdinal, columnSchema.getNumberOfChild(),
-                  columnsInTable, carbonDimension, complexTypeOrdinal);
-          int numberOfChildrenDimensionAdded = dimensonOrdinal - previouseOrdinal;
-          for (int i = 0; i < numberOfChildrenDimensionAdded; i++) {
-            cardinalityIndexForComplexDimensionColumn.add(++tableOrdinal);
-          }
-          counter = dimensonOrdinal;
-          complexTypeOrdinal = carbonDimension.getListOfChildDimensions()
-              .get(carbonDimension.getListOfChildDimensions().size() - 1).getComplexTypeOrdinal();
-          complexTypeOrdinal++;
-          continue;
-        } else {
-          // for no dictionary dimension
-          carbonDimension = new CarbonDimension(columnSchema, dimensonOrdinal++, -1, -1, -1);
-          numberOfNoDictionaryDimension++;
-        }
-        dimensions.add(carbonDimension);
-      } else {
-        measures.add(new CarbonMeasure(columnSchema, ++measureOrdinal));
-      }
-      counter++;
-    }
-    dimColumnsCardinality = new int[cardinalityIndexForNormalDimensionColumn.size()];
-    complexDimColumnCardinality = new int[cardinalityIndexForComplexDimensionColumn.size()];
-    int index = 0;
-    // filling the cardinality of the dimension column to create the key
-    // generator
-    for (Integer cardinalityArrayIndex : cardinalityIndexForNormalDimensionColumn) {
-      dimColumnsCardinality[index++] = columnCardinality[cardinalityArrayIndex];
-    }
-    index = 0;
-    // filling the cardinality of the complex dimension column to create the
-    // key generator
-    for (Integer cardinalityArrayIndex : cardinalityIndexForComplexDimensionColumn) {
-      complexDimColumnCardinality[index++] = columnCardinality[cardinalityArrayIndex];
-    }
-  }
-
-  /**
-   * Read all primitive/complex children and set it as list of child carbon dimension to parent
-   * dimension
-   *
-   * @param dimensionOrdinal
-   * @param childCount
-   * @param listOfColumns
-   * @param parentDimension
-   * @return
-   */
-  private int readAllComplexTypeChildrens(int dimensionOrdinal, int childCount,
-      List<ColumnSchema> listOfColumns, CarbonDimension parentDimension,
-      int complexDimensionOrdianl) {
-    for (int i = 0; i < childCount; i++) {
-      ColumnSchema columnSchema = listOfColumns.get(dimensionOrdinal);
-      if (columnSchema.isDimensionColumn()) {
-        if (columnSchema.getNumberOfChild() > 0) {
-          CarbonDimension complexDimension =
-              new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1,
-                  complexDimensionOrdianl++);
-          complexDimension.initializeChildDimensionsList(columnSchema.getNumberOfChild());
-          parentDimension.getListOfChildDimensions().add(complexDimension);
-          dimensionOrdinal =
-              readAllComplexTypeChildrens(dimensionOrdinal, columnSchema.getNumberOfChild(),
-                  listOfColumns, complexDimension, complexDimensionOrdianl);
-        } else {
-          parentDimension.getListOfChildDimensions().add(
-              new CarbonDimension(columnSchema, dimensionOrdinal++, -1, -1,
-                  complexDimensionOrdianl++));
-        }
-      }
-    }
-    return dimensionOrdinal;
-  }
-
-  /**
-   * Below method will fill the key generator detail of both the type of key
-   * generator. This will be required for during both query execution and data
-   * loading.
-   */
-  private void fillKeyGeneratorDetails() {
-    // create a dimension partitioner list
-    // this list will contain information about how dimension value are
-    // stored
-    // it is stored in group or individually
-    List<Integer> dimensionPartitionList =
-        new ArrayList<Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    List<Boolean> isDictionaryColumn =
-        new ArrayList<Boolean>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    int prvcolumnGroupId = -1;
-    int counter = 0;
-    while (counter < dimensions.size()) {
-      CarbonDimension carbonDimension = dimensions.get(counter);
-      // if dimension is not a part of mdkey then no need to add
-      if (!carbonDimension.getEncoder().contains(Encoding.DICTIONARY)) {
-        isDictionaryColumn.add(false);
-        counter++;
-        continue;
-      }
-      // columnar column is stored individually
-      // so add one
-      if (carbonDimension.isColumnar()) {
-        dimensionPartitionList.add(1);
-        isDictionaryColumn.add(true);
-      }
-      // if in a group then need to add how many columns a selected in
-      // group
-      if (!carbonDimension.isColumnar() && carbonDimension.columnGroupId() == prvcolumnGroupId) {
-        // incrementing the previous value of the list as it is in same column group
-        dimensionPartitionList.set(dimensionPartitionList.size() - 1,
-            dimensionPartitionList.get(dimensionPartitionList.size() - 1) + 1);
-      } else if (!carbonDimension.isColumnar()) {
-        dimensionPartitionList.add(1);
-        isDictionaryColumn.add(true);
-      }
-      prvcolumnGroupId = carbonDimension.columnGroupId();
-      counter++;
-    }
-    // get the partitioner
-    int[] dimensionPartitions = ArrayUtils
-        .toPrimitive(dimensionPartitionList.toArray(new Integer[dimensionPartitionList.size()]));
-    // get the bit length of each column
-    int[] bitLength = CarbonUtil.getDimensionBitLength(dimColumnsCardinality, dimensionPartitions);
-    // create a key generator
-    this.dimensionKeyGenerator = new MultiDimKeyVarLengthGenerator(bitLength);
-    this.fixedLengthKeySplitter =
-        new MultiDimKeyVarLengthVariableSplitGenerator(bitLength, dimensionPartitions);
-    // get the size of each value in file block
-    int[] dictionayDimColumnValueSize = fixedLengthKeySplitter.getBlockKeySize();
-    int index = -1;
-    this.eachDimColumnValueSize = new int[isDictionaryColumn.size()];
-    for (int i = 0; i < eachDimColumnValueSize.length; i++) {
-      if (!isDictionaryColumn.get(i)) {
-        eachDimColumnValueSize[i] = -1;
-        continue;
-      }
-      eachDimColumnValueSize[i] = dictionayDimColumnValueSize[++index];
-    }
-    if (complexDimensions.size() > 0) {
-      int[] complexDimesionParition = new int[complexDimColumnCardinality.length];
-      // as complex dimension will be stored in column format add one
-      Arrays.fill(complexDimesionParition, 1);
-      bitLength =
-          CarbonUtil.getDimensionBitLength(complexDimColumnCardinality, complexDimesionParition);
-      for (int i = 0; i < bitLength.length; i++) {
-        if (complexDimColumnCardinality[i] == 0) {
-          bitLength[i] = 64;
-        }
-      }
-      ColumnarSplitter keySplitter =
-          new MultiDimKeyVarLengthVariableSplitGenerator(bitLength, complexDimesionParition);
-      eachComplexDimColumnValueSize = keySplitter.getBlockKeySize();
-    } else {
-      eachComplexDimColumnValueSize = new int[0];
-    }
-  }
-
-  /**
-   * Below method will be used to create a mapping of column group and its column cardinality this
-   * mapping will have column group id to cardinality of the dimension present in
-   * the column group.This mapping will be used during query execution, to create
-   * a mask key for the column group dimension which will be used in aggregation
-   * and filter query as column group dimension will be stored at the bit level
-   */
-  private void fillColumnGroupAndItsCardinality(int[] cardinality) {
-    // mapping of the column group and its ordinal
-    Map<Integer, List<Integer>> columnGroupAndOrdinalMapping =
-        new HashMap<Integer, List<Integer>>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    // to store a column group
-    List<Integer> currentColumnGroup = null;
-    // current index
-    int index = 0;
-    // previous column group to check all the column of column id has bee selected
-    int prvColumnGroupId = -1;
-    while (index < dimensions.size()) {
-      // if dimension group id is not zero and it is same as the previous
-      // column id
-      // then we need to add ordinal of that column as it belongs to same
-      // column group
-      if (!dimensions.get(index).isColumnar()
-          && dimensions.get(index).columnGroupId() == prvColumnGroupId
-          && null != currentColumnGroup) {
-        currentColumnGroup.add(index);
-      }
-      // if column is not a columnar then new column group has come
-      // so we need to create a list of new column id group and add the
-      // ordinal
-      else if (!dimensions.get(index).isColumnar()) {
-        currentColumnGroup = new ArrayList<Integer>();
-        columnGroupAndOrdinalMapping.put(dimensions.get(index).columnGroupId(), currentColumnGroup);
-        currentColumnGroup.add(index);
-      }
-      // update the column id every time,this is required to group the
-      // columns
-      // of the same column group
-      prvColumnGroupId = dimensions.get(index).columnGroupId();
-      index++;
-    }
-    // Initializing the map
-    this.columnGroupAndItsKeygenartor =
-        new HashMap<Integer, KeyGenerator>(columnGroupAndOrdinalMapping.size());
-    this.columnGroupOrdinalToMdkeymapping = new HashMap<>(columnGroupAndOrdinalMapping.size());
-    int[] columnGroupCardinality = null;
-    index = 0;
-    Iterator<Entry<Integer, List<Integer>>> iterator =
-        columnGroupAndOrdinalMapping.entrySet().iterator();
-    while (iterator.hasNext()) {
-      Entry<Integer, List<Integer>> next = iterator.next();
-      List<Integer> currentGroupOrdinal = next.getValue();
-      Map<Integer, Integer> colGrpOrdinalMdkeyMapping = new HashMap<>(currentGroupOrdinal.size());
-      // create the cardinality array
-      columnGroupCardinality = new int[currentGroupOrdinal.size()];
-      for (int i = 0; i < columnGroupCardinality.length; i++) {
-        // fill the cardinality
-        columnGroupCardinality[i] = cardinality[currentGroupOrdinal.get(i)];
-        colGrpOrdinalMdkeyMapping.put(currentGroupOrdinal.get(i), i);
-      }
-      this.columnGroupAndItsKeygenartor.put(next.getKey(), new MultiDimKeyVarLengthGenerator(
-          CarbonUtil.getDimensionBitLength(columnGroupCardinality,
-              new int[] { columnGroupCardinality.length })));
-      this.columnGroupOrdinalToMdkeymapping.put(next.getKey(), colGrpOrdinalMdkeyMapping);
-    }
-  }
-
-  /**
-   * Below method is to get the value of each dimension column. As this method
-   * will be used only once so we can merge both the dimension and complex
-   * dimension array. Complex dimension will be store at last so first copy
-   * the normal dimension the copy the complex dimension size. If we store
-   * this value as a class variable unnecessarily we will waste some space
-   *
-   * @return each dimension value size
-   */
-  public int[] getDimensionColumnsValueSize() {
-    int[] dimensionValueSize =
-        new int[eachDimColumnValueSize.length + eachComplexDimColumnValueSize.length];
-    System
-        .arraycopy(eachDimColumnValueSize, 0, dimensionValueSize, 0, eachDimColumnValueSize.length);
-    System.arraycopy(eachComplexDimColumnValueSize, 0, dimensionValueSize,
-        eachDimColumnValueSize.length, eachComplexDimColumnValueSize.length);
-    return dimensionValueSize;
-  }
-
-  /**
-   * @return the dimensionKeyGenerator
-   */
-  public KeyGenerator getDimensionKeyGenerator() {
-    return dimensionKeyGenerator;
-  }
-
-  /**
-   * @return the dimensions
-   */
-  public List<CarbonDimension> getDimensions() {
-    return dimensions;
-  }
-
-  /**
-   * @return the complexDimensions
-   */
-  public List<CarbonDimension> getComplexDimensions() {
-    return complexDimensions;
-  }
-
-  /**
-   * @return the measures
-   */
-  public List<CarbonMeasure> getMeasures() {
-    return measures;
-  }
-
-  /**
-   * @return the dimColumnsCardinality
-   */
-  public int[] getDimColumnsCardinality() {
-    return dimColumnsCardinality;
-  }
-
-  /**
-   * @return the complexDimColumnCardinality
-   */
-  public int[] getComplexDimColumnCardinality() {
-    return complexDimColumnCardinality;
-  }
-
-  /**
-   * @return the dimensionOrdinalToBlockMapping
-   */
-  public Map<Integer, Integer> getDimensionOrdinalToBlockMapping() {
-    return dimensionOrdinalToBlockMapping;
-  }
-
-  /**
-   * @return the measuresOrdinalToBlockMapping
-   */
-  public Map<Integer, Integer> getMeasuresOrdinalToBlockMapping() {
-    return measuresOrdinalToBlockMapping;
-  }
-
-  /**
-   * @return the eachDimColumnValueSize
-   */
-  public int[] getEachDimColumnValueSize() {
-    return eachDimColumnValueSize;
-  }
-
-  /**
-   * @return the eachComplexDimColumnValueSize
-   */
-  public int[] getEachComplexDimColumnValueSize() {
-    return eachComplexDimColumnValueSize;
-  }
-
-  /**
-   * @return the fixedLengthKeySplitter
-   */
-  public ColumnarSplitter getFixedLengthKeySplitter() {
-    return fixedLengthKeySplitter;
-  }
-
-  /**
-   * @return the columnGroupAndItsKeygenartor
-   */
-  public Map<Integer, KeyGenerator> getColumnGroupAndItsKeygenartor() {
-    return columnGroupAndItsKeygenartor;
-  }
-
-  /**
-   * @return the numberOfNoDictionaryDimension
-   */
-  public int getNumberOfNoDictionaryDimension() {
-    return numberOfNoDictionaryDimension;
-  }
-
-  /**
-   * @return
-   */
-  public int[][] getColumnGroups() {
-    return colGroupModel.getColumnGroup();
-  }
-
-  /**
-   * @return colGroupModel
-   */
-  public ColumnGroupModel getColumnGroupModel() {
-    return this.colGroupModel;
-  }
-
-  /**
-   * get mdkey ordinal for given dimension ordinal of given column group
-   *
-   * @param colGrpId
-   * @param ordinal
-   * @return mdkeyordinal
-   */
-  public int getColumnGroupMdKeyOrdinal(int colGrpId, int ordinal) {
-    return columnGroupOrdinalToMdkeymapping.get(colGrpId).get(ordinal);
-  }
-
-  /**
-   * It returns no of column availble in given column group
-   *
-   * @param colGrpId
-   * @return no of column in given column group
-   */
-  public int getNoOfColumnsInColumnGroup(int colGrpId) {
-    return columnGroupOrdinalToMdkeymapping.get(colGrpId).size();
-  }
-
-  /**
-   * @param blockIndex
-   * @return It returns all dimension present in given block index
-   */
-  public Set<Integer> getDimensionOrdinalForBlock(int blockIndex) {
-    return blockTodimensionOrdinalMapping.get(blockIndex);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/block/SegmentTaskIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/block/SegmentTaskIndex.java b/core/src/main/java/org/carbondata/core/carbon/datastore/block/SegmentTaskIndex.java
deleted file mode 100644
index 7e415ac..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/block/SegmentTaskIndex.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.block;
-
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.BTreeBuilderInfo;
-import org.carbondata.core.carbon.datastore.BtreeBuilder;
-import org.carbondata.core.carbon.datastore.impl.btree.BlockBTreeBuilder;
-import org.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
-
-/**
- * Class which is responsible for loading the b+ tree block. This class will
- * persist all the detail of a table segment
- */
-public class SegmentTaskIndex extends AbstractIndex {
-
-  /**
-   * Below method is store the blocks in some data structure
-   *
-   * @param blockInfo block detail
-   */
-  public void buildIndex(List<DataFileFooter> footerList) {
-    // create a metadata details
-    // this will be useful in query handling
-    // all the data file metadata will have common segment properties we
-    // can use first one to get create the segment properties
-    segmentProperties = new SegmentProperties(footerList.get(0).getColumnInTable(),
-        footerList.get(0).getSegmentInfo().getColumnCardinality());
-    // create a segment builder info
-    // in case of segment create we do not need any file path and each column value size
-    // as Btree will be build as per min max and start key
-    BTreeBuilderInfo btreeBuilderInfo = new BTreeBuilderInfo(footerList, null);
-    BtreeBuilder blocksBuilder = new BlockBTreeBuilder();
-    // load the metadata
-    blocksBuilder.build(btreeBuilderInfo);
-    dataRefNode = blocksBuilder.get();
-    for (DataFileFooter footer : footerList) {
-      totalNumberOfRows += footer.getNumberOfRows();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/block/TableBlockInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/block/TableBlockInfo.java b/core/src/main/java/org/carbondata/core/carbon/datastore/block/TableBlockInfo.java
deleted file mode 100644
index cd0e831..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/block/TableBlockInfo.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.block;
-
-import java.io.Serializable;
-import java.util.Arrays;
-
-import org.carbondata.core.carbon.path.CarbonTablePath;
-import org.carbondata.core.carbon.path.CarbonTablePath.DataFileUtil;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-
-/**
- * class will be used to pass the block detail detail will be passed form driver
- * to all the executor to load the b+ tree
- */
-public class TableBlockInfo extends Distributable
-    implements Serializable, Comparable<Distributable> {
-
-  /**
-   * serialization id
-   */
-  private static final long serialVersionUID = -6502868998599821172L;
-
-  /**
-   * full qualified file path of the block
-   */
-  private String filePath;
-
-  /**
-   * block offset in the file
-   */
-  private long blockOffset;
-
-  /**
-   * length of the block
-   */
-  private long blockLength;
-
-  /**
-   * id of the segment this will be used to sort the blocks
-   */
-  private String segmentId;
-
-  private String[] locations;
-
-
-  public TableBlockInfo(String filePath, long blockOffset, String segmentId, String[] locations,
-      long blockLength) {
-    this.filePath = FileFactory.getUpdatedFilePath(filePath);
-    this.blockOffset = blockOffset;
-    this.segmentId = segmentId;
-    this.locations = locations;
-    this.blockLength = blockLength;
-  }
-
-  /**
-   * @return the filePath
-   */
-  public String getFilePath() {
-    return filePath;
-  }
-
-  /**
-   * @return the blockOffset
-   */
-  public long getBlockOffset() {
-    return blockOffset;
-  }
-
-
-  /**
-   * @return the segmentId
-   */
-  public String getSegmentId() {
-    return segmentId;
-  }
-
-  /**
-   * @return the blockLength
-   */
-  public long getBlockLength() {
-    return blockLength;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see java.lang.Object#equals(java.lang.Object)
-   */
-  @Override public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (obj == null) {
-      return false;
-    }
-    if (!(obj instanceof TableBlockInfo)) {
-      return false;
-    }
-    TableBlockInfo other = (TableBlockInfo) obj;
-    if (!segmentId.equals(other.segmentId)) {
-      return false;
-    }
-    if (blockOffset != other.blockOffset) {
-      return false;
-    }
-    if (blockLength != other.blockLength) {
-      return false;
-    }
-
-    if (filePath == null) {
-      if (other.filePath != null) {
-        return false;
-      }
-    } else if (!filePath.equals(other.filePath)) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * Below method will used to compare to TableBlockInfos object this will
-   * used for sorting Comparison logic is: 1. compare segment id if segment id
-   * is same 2. compare task id if task id is same 3. compare offsets of the
-   * block
-   */
-  @Override public int compareTo(Distributable other) {
-
-    int compareResult = 0;
-    // get the segment id
-    // converr seg ID to double.
-
-    double seg1 = Double.parseDouble(segmentId);
-    double seg2 = Double.parseDouble(((TableBlockInfo) other).segmentId);
-    if (seg1 - seg2 < 0) {
-      return -1;
-    }
-    if (seg1 - seg2 > 0) {
-      return 1;
-    }
-
-    // Comparing the time task id of the file to other
-    // if both the task id of the file is same then we need to compare the
-    // offset of
-    // the file
-    if (CarbonTablePath.isCarbonDataFile(filePath)) {
-      int firstTaskId = Integer.parseInt(DataFileUtil.getTaskNo(filePath));
-      int otherTaskId = Integer.parseInt(DataFileUtil.getTaskNo(((TableBlockInfo) other).filePath));
-      if (firstTaskId != otherTaskId) {
-        return firstTaskId - otherTaskId;
-      }
-      // compare the part no of both block info
-      int firstPartNo = Integer.parseInt(DataFileUtil.getPartNo(filePath));
-      int SecondPartNo =
-          Integer.parseInt(DataFileUtil.getPartNo(((TableBlockInfo) other).filePath));
-      compareResult = firstPartNo - SecondPartNo;
-    } else {
-      compareResult = filePath.compareTo(((TableBlockInfo) other).getFilePath());
-    }
-    if (compareResult != 0) {
-      return compareResult;
-    }
-    //compare result is not 0 then return
-    // if part no is also same then compare the offset and length of the block
-    if (blockOffset + blockLength
-        < ((TableBlockInfo) other).blockOffset + ((TableBlockInfo) other).blockLength) {
-      return -1;
-    } else if (blockOffset + blockLength
-        > ((TableBlockInfo) other).blockOffset + ((TableBlockInfo) other).blockLength) {
-      return 1;
-    }
-    return 0;
-  }
-
-  @Override public int hashCode() {
-    int result = filePath.hashCode();
-    result = 31 * result + (int) (blockOffset ^ (blockOffset >>> 32));
-    result = 31 * result + (int) (blockLength ^ (blockLength >>> 32));
-    result = 31 * result + segmentId.hashCode();
-    result = 31 * result + Arrays.hashCode(locations);
-    return result;
-  }
-
-  @Override public String[] getLocations() {
-    return locations;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/block/TableTaskInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/block/TableTaskInfo.java b/core/src/main/java/org/carbondata/core/carbon/datastore/block/TableTaskInfo.java
deleted file mode 100644
index 0def27b..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/block/TableTaskInfo.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.carbondata.core.carbon.datastore.block;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-
-/**
- * This class is responsible for maintaining the mapping of tasks of a node.
- */
-public class TableTaskInfo extends Distributable {
-
-  private final List<TableBlockInfo> tableBlockInfoList;
-  private final String taskId;
-  public String getTaskId() {
-    return taskId;
-  }
-
-  public List<TableBlockInfo> getTableBlockInfoList() {
-    return tableBlockInfoList;
-  }
-
-  public TableTaskInfo(String taskId, List<TableBlockInfo> tableBlockInfoList){
-    this.taskId = taskId;
-    this.tableBlockInfoList = tableBlockInfoList;
-  }
-
-  @Override public String[] getLocations() {
-    Set<String> locations = new HashSet<String>();
-    for(TableBlockInfo tableBlockInfo: tableBlockInfoList){
-      locations.addAll(Arrays.asList(tableBlockInfo.getLocations()));
-    }
-    locations.toArray(new String[locations.size()]);
-    List<String> nodes =  TableTaskInfo.maxNoNodes(tableBlockInfoList);
-    return nodes.toArray(new String[nodes.size()]);
-  }
-
-  @Override public int compareTo(Distributable o) {
-    return taskId.compareTo(((TableTaskInfo)o).getTaskId());
-  }
-
-  /**
-   * Finding which node has the maximum number of blocks for it.
-   * @param blockList
-   * @return
-   */
-  public static List<String> maxNoNodes(List<TableBlockInfo> blockList) {
-    boolean useIndex = true;
-    Integer maxOccurence = 0;
-    String maxNode = null;
-    Map<String, Integer> nodeAndOccurenceMapping = new TreeMap<>();
-
-    // populate the map of node and number of occurences of that node.
-    for (TableBlockInfo block : blockList) {
-      for (String node : block.getLocations()) {
-        Integer nodeOccurence = nodeAndOccurenceMapping.get(node);
-        if (null == nodeOccurence) {
-          nodeAndOccurenceMapping.put(node, 1);
-        } else {
-          nodeOccurence++;
-        }
-      }
-    }
-    Integer previousValueOccurence = null;
-
-    // check which node is occured maximum times.
-    for (Map.Entry<String, Integer> entry : nodeAndOccurenceMapping.entrySet()) {
-      // finding the maximum node.
-      if (entry.getValue() > maxOccurence) {
-        maxOccurence = entry.getValue();
-        maxNode = entry.getKey();
-      }
-      // first time scenario. initialzing the previous value.
-      if (null == previousValueOccurence) {
-        previousValueOccurence = entry.getValue();
-      } else {
-        // for the case where all the nodes have same number of blocks then
-        // we need to return complete list instead of max node.
-        if (previousValueOccurence != entry.getValue()) {
-          useIndex = false;
-        }
-      }
-    }
-
-    // if all the nodes have equal occurence then returning the complete key set.
-    if (useIndex) {
-      return new ArrayList<>(nodeAndOccurenceMapping.keySet());
-    }
-
-    // if any max node is found then returning the max node.
-    List<String> node =  new ArrayList<>(1);
-    node.add(maxNode);
-    return node;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/block/TaskBlockInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/block/TaskBlockInfo.java b/core/src/main/java/org/carbondata/core/carbon/datastore/block/TaskBlockInfo.java
deleted file mode 100644
index 3417f59..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/block/TaskBlockInfo.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.datastore.block;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * This class contains blocks info of each task
- */
-public class TaskBlockInfo {
-
-  // stores TableBlockInfo list of each task
-  private Map<String, List<TableBlockInfo>> taskBlockInfoMapping;
-
-  public TaskBlockInfo(){
-
-    taskBlockInfoMapping = new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-  }
-
-  /**
-   * returns task set
-   * @return
-   */
-  public Set<String> getTaskSet() {
-    return taskBlockInfoMapping.keySet();
-  }
-
-
-  /**
-   * returns TableBlockInfoList of given task
-   * @return
-   */
-  public List<TableBlockInfo> getTableBlockInfoList(String task) {
-    return taskBlockInfoMapping.get(task);
-  }
-
-  /**
-   *  maps TableBlockInfoList to respective task
-   * @param task
-   * @param tableBlockInfoList
-   */
-  public void addTableBlockInfoList(String task, List<TableBlockInfo> tableBlockInfoList) {
-    taskBlockInfoMapping.put(task, tableBlockInfoList);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/DimensionChunkAttributes.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/DimensionChunkAttributes.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/DimensionChunkAttributes.java
deleted file mode 100644
index 997f54d..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/DimensionChunkAttributes.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk;
-
-/**
- * Dimension chunk attributes which holds all the
- * property about the dimension chunk data
- */
-public class DimensionChunkAttributes {
-
-  /**
-   * inverted index of the data
-   */
-  private int[] invertedIndexes;
-
-  /**
-   * reverse index of the data
-   */
-  private int[] invertedIndexesReverse;
-
-  /**
-   * each row size
-   */
-  private int columnValueSize;
-
-  /**
-   * is no dictionary
-   */
-  private boolean isNoDictionary;
-
-  /**
-   * @return the invertedIndexes
-   */
-  public int[] getInvertedIndexes() {
-    return invertedIndexes;
-  }
-
-  /**
-   * @param invertedIndexes the invertedIndexes to set
-   */
-  public void setInvertedIndexes(int[] invertedIndexes) {
-    this.invertedIndexes = invertedIndexes;
-  }
-
-  /**
-   * @return the invertedIndexesReverse
-   */
-  public int[] getInvertedIndexesReverse() {
-    return invertedIndexesReverse;
-  }
-
-  /**
-   * @param invertedIndexesReverse the invertedIndexesReverse to set
-   */
-  public void setInvertedIndexesReverse(int[] invertedIndexesReverse) {
-    this.invertedIndexesReverse = invertedIndexesReverse;
-  }
-
-  /**
-   * @return the eachRowSize
-   */
-  public int getColumnValueSize() {
-    return columnValueSize;
-  }
-
-  /**
-   * @param eachRowSize the eachRowSize to set
-   */
-  public void setEachRowSize(int eachRowSize) {
-    this.columnValueSize = eachRowSize;
-  }
-
-  /**
-   * @return the isNoDictionary
-   */
-  public boolean isNoDictionary() {
-    return isNoDictionary;
-  }
-
-  /**
-   * @param isNoDictionary the isNoDictionary to set
-   */
-  public void setNoDictionary(boolean isNoDictionary) {
-    this.isNoDictionary = isNoDictionary;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/DimensionColumnDataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/DimensionColumnDataChunk.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/DimensionColumnDataChunk.java
deleted file mode 100644
index 6a30bc7..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/DimensionColumnDataChunk.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk;
-
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-
-/**
- * Interface for dimension column chunk.
- */
-public interface DimensionColumnDataChunk<T> {
-
-  /**
-   * Below method will be used to fill the data based on offset and row id
-   *
-   * @param data   data to filed
-   * @param offset offset from which data need to be filed
-   * @param rowId  row id of the chunk
-   * @return how many bytes was copied
-   */
-  int fillChunkData(byte[] data, int offset, int columnIndex, KeyStructureInfo restructuringInfo);
-
-  /**
-   * It uses to convert column data to dictionary integer value
-   * @param rowId
-   * @param columnIndex
-   * @param row
-   * @param restructuringInfo  @return
-   */
-  int fillConvertedChunkData(int rowId, int columnIndex, int[] row,
-      KeyStructureInfo restructuringInfo);
-
-  /**
-   * Below method to get  the data based in row id
-   *
-   * @param row id
-   *            row id of the data
-   * @return chunk
-   */
-  byte[] getChunkData(int columnIndex);
-
-  /**
-   * Below method will be used get the chunk attributes
-   *
-   * @return chunk attributes
-   */
-  DimensionChunkAttributes getAttributes();
-
-  /**
-   * Below method will be used to return the complete data chunk
-   * This will be required during filter query
-   *
-   * @return complete chunk
-   */
-  T getCompleteDataChunk();
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/MeasureColumnDataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/MeasureColumnDataChunk.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/MeasureColumnDataChunk.java
deleted file mode 100644
index e093eb3..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/MeasureColumnDataChunk.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk;
-
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.PresenceMeta;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-
-/**
- * Holder for measure column chunk
- * it will have data and its attributes which will
- * be required for processing
- */
-public class MeasureColumnDataChunk {
-
-  /**
-   * measure chunk
-   */
-  private CarbonReadDataHolder measureDataHolder;
-
-  /**
-   * below to hold null value holds this information
-   * about the null value index this will be helpful in case of
-   * to remove the null value while aggregation
-   */
-  private PresenceMeta nullValueIndexHolder;
-
-  /**
-   * @return the measureDataHolder
-   */
-  public CarbonReadDataHolder getMeasureDataHolder() {
-    return measureDataHolder;
-  }
-
-  /**
-   * @param measureDataHolder the measureDataHolder to set
-   */
-  public void setMeasureDataHolder(CarbonReadDataHolder measureDataHolder) {
-    this.measureDataHolder = measureDataHolder;
-  }
-
-  /**
-   * @return the nullValueIndexHolder
-   */
-  public PresenceMeta getNullValueIndexHolder() {
-    return nullValueIndexHolder;
-  }
-
-  /**
-   * @param nullValueIndexHolder the nullValueIndexHolder to set
-   */
-  public void setNullValueIndexHolder(PresenceMeta nullValueIndexHolder) {
-    this.nullValueIndexHolder = nullValueIndexHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/ColumnGroupDimensionDataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/ColumnGroupDimensionDataChunk.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/ColumnGroupDimensionDataChunk.java
deleted file mode 100644
index d8536a1..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/ColumnGroupDimensionDataChunk.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk.impl;
-
-import org.carbondata.core.carbon.datastore.chunk.DimensionChunkAttributes;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-
-/**
- * This class is holder of the dimension column chunk data of the fixed length
- * key size
- */
-public class ColumnGroupDimensionDataChunk implements DimensionColumnDataChunk<byte[]> {
-
-  /**
-   * dimension chunk attributes
-   */
-  private DimensionChunkAttributes chunkAttributes;
-
-  /**
-   * data chunks
-   */
-  private byte[] dataChunk;
-
-  /**
-   * Constructor for this class
-   *
-   * @param dataChunk       data chunk
-   * @param chunkAttributes chunk attributes
-   */
-  public ColumnGroupDimensionDataChunk(byte[] dataChunk, DimensionChunkAttributes chunkAttributes) {
-    this.chunkAttributes = chunkAttributes;
-    this.dataChunk = dataChunk;
-  }
-
-  /**
-   * Below method will be used to fill the data based on offset and row id
-   *
-   * @param data             data to filed
-   * @param offset           offset from which data need to be filed
-   * @param rowId            row id of the chunk
-   * @param restructuringInfo define the structure of the key
-   * @return how many bytes was copied
-   */
-  @Override public int fillChunkData(byte[] data, int offset, int rowId,
-      KeyStructureInfo restructuringInfo) {
-    byte[] maskedKey =
-        getMaskedKey(dataChunk, rowId * chunkAttributes.getColumnValueSize(), restructuringInfo);
-    System.arraycopy(maskedKey, 0, data, offset, maskedKey.length);
-    return maskedKey.length;
-  }
-
-  /**
-   * Converts to column dictionary integer value
-   */
-  @Override public int fillConvertedChunkData(int rowId, int columnIndex, int[] row,
-      KeyStructureInfo info) {
-    int start = rowId * chunkAttributes.getColumnValueSize();
-    long[] keyArray = info.getKeyGenerator().getKeyArray(dataChunk, start);
-    int[] ordinal = info.getMdkeyQueryDimensionOrdinal();
-    for (int i = 0; i < ordinal.length; i++) {
-      row[columnIndex++] = (int)keyArray[ordinal[i]];
-    }
-    return columnIndex;
-  }
-
-  /**
-   * Below method masks key
-   *
-   */
-  public byte[] getMaskedKey(byte[] data, int offset, KeyStructureInfo info) {
-    byte[] maskedKey = new byte[info.getMaskByteRanges().length];
-    int counter = 0;
-    int byteRange = 0;
-    for (int i = 0; i < info.getMaskByteRanges().length; i++) {
-      byteRange = info.getMaskByteRanges()[i];
-      maskedKey[counter++] = (byte) (data[byteRange + offset] & info.getMaxKey()[byteRange]);
-    }
-    return maskedKey;
-  }
-
-  /**
-   * Below method to get the data based in row id
-   *
-   * @param rowId row id of the data
-   * @return chunk
-   */
-  @Override public byte[] getChunkData(int rowId) {
-    byte[] data = new byte[chunkAttributes.getColumnValueSize()];
-    System.arraycopy(dataChunk, rowId * data.length, data, 0, data.length);
-    return data;
-  }
-
-  /**
-   * Below method will be used get the chunk attributes
-   *
-   * @return chunk attributes
-   */
-  @Override public DimensionChunkAttributes getAttributes() {
-    return chunkAttributes;
-  }
-
-  /**
-   * Below method will be used to return the complete data chunk
-   * This will be required during filter query
-   *
-   * @return complete chunk
-   */
-  @Override public byte[] getCompleteDataChunk() {
-    return dataChunk;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/FixedLengthDimensionDataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/FixedLengthDimensionDataChunk.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/FixedLengthDimensionDataChunk.java
deleted file mode 100644
index cbe48b2..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/FixedLengthDimensionDataChunk.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk.impl;
-
-import org.carbondata.core.carbon.datastore.chunk.DimensionChunkAttributes;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-
-/**
- * This class is holder of the dimension column chunk data of the fixed length
- * key size
- */
-public class FixedLengthDimensionDataChunk implements DimensionColumnDataChunk<byte[]> {
-
-  /**
-   * dimension chunk attributes
-   */
-  private DimensionChunkAttributes chunkAttributes;
-
-  /**
-   * data chunks
-   */
-  private byte[] dataChunk;
-
-  /**
-   * Constructor for this class
-   *
-   * @param dataChunk       data chunk
-   * @param chunkAttributes chunk attributes
-   */
-  public FixedLengthDimensionDataChunk(byte[] dataChunk, DimensionChunkAttributes chunkAttributes) {
-    this.chunkAttributes = chunkAttributes;
-    this.dataChunk = dataChunk;
-  }
-
-  /**
-   * Below method will be used to fill the data based on offset and row id
-   *
-   * @param data             data to filed
-   * @param offset           offset from which data need to be filed
-   * @param index            row id of the chunk
-   * @param keyStructureInfo define the structure of the key
-   * @return how many bytes was copied
-   */
-  @Override public int fillChunkData(byte[] data, int offset, int index,
-      KeyStructureInfo keyStructureInfo) {
-    if (chunkAttributes.getInvertedIndexes() != null) {
-      index = chunkAttributes.getInvertedIndexesReverse()[index];
-    }
-    System.arraycopy(dataChunk, index * chunkAttributes.getColumnValueSize(), data, offset,
-        chunkAttributes.getColumnValueSize());
-    return chunkAttributes.getColumnValueSize();
-  }
-
-  /**
-   * Converts to column dictionary integer value
-   */
-  @Override public int fillConvertedChunkData(int rowId, int columnIndex, int[] row,
-      KeyStructureInfo restructuringInfo) {
-    if (chunkAttributes.getInvertedIndexes() != null) {
-      rowId = chunkAttributes.getInvertedIndexesReverse()[rowId];
-    }
-    int start = rowId * chunkAttributes.getColumnValueSize();
-    int dict = 0;
-    for (int i = start; i < start + chunkAttributes.getColumnValueSize(); i++) {
-      dict <<= 8;
-      dict ^= dataChunk[i] & 0xFF;
-    }
-    row[columnIndex] = dict;
-    return columnIndex + 1;
-  }
-
-  /**
-   * Below method to get the data based in row id
-   *
-   * @param index row id of the data
-   * @return chunk
-   */
-  @Override public byte[] getChunkData(int index) {
-    byte[] data = new byte[chunkAttributes.getColumnValueSize()];
-    if (chunkAttributes.getInvertedIndexes() != null) {
-      index = chunkAttributes.getInvertedIndexesReverse()[index];
-    }
-    System.arraycopy(dataChunk, index * chunkAttributes.getColumnValueSize(), data, 0,
-        chunkAttributes.getColumnValueSize());
-    return data;
-  }
-
-  /**
-   * Below method will be used get the chunk attributes
-   *
-   * @return chunk attributes
-   */
-  @Override public DimensionChunkAttributes getAttributes() {
-    return chunkAttributes;
-  }
-
-  /**
-   * Below method will be used to return the complete data chunk
-   * This will be required during filter query
-   *
-   * @return complete chunk
-   */
-  @Override public byte[] getCompleteDataChunk() {
-    return dataChunk;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/VariableLengthDimensionDataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/VariableLengthDimensionDataChunk.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/VariableLengthDimensionDataChunk.java
deleted file mode 100644
index 7c69490..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/impl/VariableLengthDimensionDataChunk.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk.impl;
-
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.chunk.DimensionChunkAttributes;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-
-/**
- * This class is holder of the dimension column chunk data of the variable
- * length key size
- */
-public class VariableLengthDimensionDataChunk implements DimensionColumnDataChunk<List<byte[]>> {
-
-  /**
-   * dimension chunk attributes
-   */
-  private DimensionChunkAttributes chunkAttributes;
-
-  /**
-   * data chunk
-   */
-  private List<byte[]> dataChunk;
-
-  /**
-   * Constructor for this class
-   *
-   * @param dataChunk       data chunk
-   * @param chunkAttributes chunk attributes
-   */
-  public VariableLengthDimensionDataChunk(List<byte[]> dataChunk,
-      DimensionChunkAttributes chunkAttributes) {
-    this.chunkAttributes = chunkAttributes;
-    this.dataChunk = dataChunk;
-  }
-
-  /**
-   * Below method will be used to fill the data based on offset and row id
-   *
-   * @param data             data to filed
-   * @param offset           offset from which data need to be filed
-   * @param index            row id of the chunk
-   * @param restructuringInfo define the structure of the key
-   * @return how many bytes was copied
-   */
-  @Override public int fillChunkData(byte[] data, int offset, int index,
-      KeyStructureInfo restructuringInfo) {
-    // no required in this case because this column chunk is not the part if
-    // mdkey
-    return 0;
-  }
-
-  /**
-   * Converts to column dictionary integer value
-   * @param rowId
-   * @param columnIndex
-   * @param row
-   * @param restructuringInfo  @return
-   */
-  @Override public int fillConvertedChunkData(int rowId, int columnIndex, int[] row,
-      KeyStructureInfo restructuringInfo) {
-    return columnIndex + 1;
-  }
-
-  /**
-   * Below method to get the data based in row id
-   *
-   * @param index row id of the data
-   * @return chunk
-   */
-  @Override public byte[] getChunkData(int index) {
-    if (null != chunkAttributes.getInvertedIndexes()) {
-      index = chunkAttributes.getInvertedIndexesReverse()[index];
-    }
-    return dataChunk.get(index);
-  }
-
-  /**
-   * Below method will be used get the chunk attributes
-   *
-   * @return chunk attributes
-   */
-  @Override public DimensionChunkAttributes getAttributes() {
-    return chunkAttributes;
-  }
-
-  /**
-   * Below method will be used to return the complete data chunk
-   * This will be required during filter query
-   *
-   * @return complete chunk
-   */
-  @Override public List<byte[]> getCompleteDataChunk() {
-    return dataChunk;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/DimensionColumnChunkReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/DimensionColumnChunkReader.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/DimensionColumnChunkReader.java
deleted file mode 100644
index bf931a0..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/DimensionColumnChunkReader.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk.reader;
-
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.datastorage.store.FileHolder;
-
-/**
- * Interface for reading the data chunk
- * Its concrete implementation can be used to read the chunk.
- * compressed or uncompressed chunk
- */
-public interface DimensionColumnChunkReader {
-
-  /**
-   * Below method will be used to read the chunk based on block indexes
-   *
-   * @param fileReader   file reader to read the blocks from file
-   * @param blockIndexes blocks to be read
-   * @return dimension column chunks
-   */
-  DimensionColumnDataChunk[] readDimensionChunks(FileHolder fileReader, int... blockIndexes);
-
-  /**
-   * Below method will be used to read the chunk based on block index
-   *
-   * @param fileReader file reader to read the blocks from file
-   * @param blockIndex block to be read
-   * @return dimension column chunk
-   */
-  DimensionColumnDataChunk readDimensionChunk(FileHolder fileReader, int blockIndex);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/MeasureColumnChunkReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/MeasureColumnChunkReader.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/MeasureColumnChunkReader.java
deleted file mode 100644
index 37f20c9..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/MeasureColumnChunkReader.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk.reader;
-
-import org.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
-import org.carbondata.core.datastorage.store.FileHolder;
-
-/**
- * Reader interface for reading the measure blocks from file
- */
-public interface MeasureColumnChunkReader {
-
-  /**
-   * Method to read the blocks data based on block indexes
-   *
-   * @param fileReader   file reader to read the blocks
-   * @param blockIndexes blocks to be read
-   * @return measure data chunks
-   */
-  MeasureColumnDataChunk[] readMeasureChunks(FileHolder fileReader, int... blockIndexes);
-
-  /**
-   * Method to read the blocks data based on block index
-   *
-   * @param fileReader file reader to read the blocks
-   * @param blockIndex block to be read
-   * @return measure data chunk
-   */
-  MeasureColumnDataChunk readMeasureChunk(FileHolder fileReader, int blockIndex);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/dimension/AbstractChunkReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/dimension/AbstractChunkReader.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/dimension/AbstractChunkReader.java
deleted file mode 100644
index f27a3e0..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/dimension/AbstractChunkReader.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk.reader.dimension;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.chunk.reader.DimensionColumnChunkReader;
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.keygenerator.mdkey.NumberCompressor;
-import org.carbondata.core.util.CarbonProperties;
-
-/**
- * Class which will have all the common properties and behavior among all type
- * of reader
- */
-public abstract class AbstractChunkReader implements DimensionColumnChunkReader {
-
-  /**
-   * compressor will be used to uncompress the data
-   */
-  protected static final Compressor<byte[]> COMPRESSOR =
-      SnappyCompression.SnappyByteCompression.INSTANCE;
-
-  /**
-   * data chunk list which holds the information
-   * about the data block metadata
-   */
-  protected List<DataChunk> dimensionColumnChunk;
-
-  /**
-   * size of the each column value
-   * for no dictionary column it will be -1
-   */
-  protected int[] eachColumnValueSize;
-
-  /**
-   * full qualified path of the data file from
-   * which data will be read
-   */
-  protected String filePath;
-
-  /**
-   * this will be used to uncompress the
-   * row id and rle chunk
-   */
-  protected NumberCompressor numberComressor;
-
-  /**
-   * number of element in each chunk
-   */
-  private int numberOfElement;
-
-  /**
-   * Constructor to get minimum parameter to create
-   * instance of this class
-   *
-   * @param dimensionColumnChunk dimension chunk metadata
-   * @param eachColumnValueSize  size of the each column value
-   * @param filePath             file from which data will be read
-   */
-  public AbstractChunkReader(List<DataChunk> dimensionColumnChunk, int[] eachColumnValueSize,
-      String filePath) {
-    this.dimensionColumnChunk = dimensionColumnChunk;
-    this.eachColumnValueSize = eachColumnValueSize;
-    this.filePath = filePath;
-    int numberOfElement = 0;
-    try {
-      numberOfElement = Integer.parseInt(CarbonProperties.getInstance()
-          .getProperty(CarbonCommonConstants.BLOCKLET_SIZE,
-              CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL));
-    } catch (NumberFormatException exception) {
-      numberOfElement = Integer.parseInt(CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
-    }
-    this.numberComressor = new NumberCompressor(numberOfElement);
-  }
-
-  /**
-   * Below method will be used to create the inverted index reverse
-   * this will be used to point to actual data in the chunk
-   *
-   * @param invertedIndex inverted index
-   * @return reverse inverted index
-   */
-  protected int[] getInvertedReverseIndex(int[] invertedIndex) {
-    int[] columnIndexTemp = new int[invertedIndex.length];
-
-    for (int i = 0; i < invertedIndex.length; i++) {
-      columnIndexTemp[invertedIndex[i]] = i;
-    }
-    return columnIndexTemp;
-  }
-
-  /**
-   * In case of no dictionary column size of the each column value
-   * will not be same, so in case of filter query we can not take
-   * advantage of binary search as length with each value will be also
-   * store with the data, so converting this data to two dimension
-   * array format filter query processing will be faster
-   *
-   * @param dataChunkWithLength no dictionary column chunk
-   *                            <Lenght><Data><Lenght><data>
-   *                            Length will store in 2 bytes
-   * @return list of data chuck, one value in list will represent one column value
-   */
-  protected List<byte[]> getNoDictionaryDataChunk(byte[] dataChunkWithLength) {
-    List<byte[]> dataChunk = new ArrayList<byte[]>(numberOfElement);
-    // wrapping the chunk to byte buffer
-    ByteBuffer buffer = ByteBuffer.wrap(dataChunkWithLength);
-    buffer.rewind();
-    byte[] data = null;
-    // iterating till all the elements are read
-    while (buffer.hasRemaining()) {
-      // as all the data is stored with length(2 bytes)
-      // first reading the size and then based on size
-      // we need to read the actual value
-      data = new byte[buffer.getShort()];
-      buffer.get(data);
-      dataChunk.add(data);
-    }
-    return dataChunk;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/dimension/CompressedDimensionChunkFileBasedReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/dimension/CompressedDimensionChunkFileBasedReader.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/dimension/CompressedDimensionChunkFileBasedReader.java
deleted file mode 100644
index 6e6f566..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/dimension/CompressedDimensionChunkFileBasedReader.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk.reader.dimension;
-
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.chunk.DimensionChunkAttributes;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.ColumnGroupDimensionDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.VariableLengthDimensionDataChunk;
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.columnar.UnBlockIndexer;
-import org.carbondata.core.util.CarbonUtil;
-
-/**
- * Compressed dimension chunk reader class
- */
-public class CompressedDimensionChunkFileBasedReader extends AbstractChunkReader {
-
-  /**
-   * Constructor to get minimum parameter to create instance of this class
-   *
-   * @param dimensionColumnChunk dimension chunk metadata
-   * @param eachColumnValueSize  size of the each column value
-   * @param filePath             file from which data will be read
-   */
-  public CompressedDimensionChunkFileBasedReader(List<DataChunk> dimensionColumnChunk,
-      int[] eachColumnValueSize, String filePath) {
-    super(dimensionColumnChunk, eachColumnValueSize, filePath);
-  }
-
-  /**
-   * Below method will be used to read the chunk based on block indexes
-   *
-   * @param fileReader   file reader to read the blocks from file
-   * @param blockIndexes blocks to be read
-   * @return dimension column chunks
-   */
-  @Override public DimensionColumnDataChunk[] readDimensionChunks(FileHolder fileReader,
-      int... blockIndexes) {
-    // read the column chunk based on block index and add
-    DimensionColumnDataChunk[] dataChunks =
-        new DimensionColumnDataChunk[dimensionColumnChunk.size()];
-    for (int i = 0; i < blockIndexes.length; i++) {
-      dataChunks[blockIndexes[i]] = readDimensionChunk(fileReader, blockIndexes[i]);
-    }
-    return dataChunks;
-  }
-
-  /**
-   * Below method will be used to read the chunk based on block index
-   *
-   * @param fileReader file reader to read the blocks from file
-   * @param blockIndex block to be read
-   * @return dimension column chunk
-   */
-  @Override public DimensionColumnDataChunk readDimensionChunk(FileHolder fileReader,
-      int blockIndex) {
-    byte[] dataPage = null;
-    int[] invertedIndexes = null;
-    int[] invertedIndexesReverse = null;
-    int[] rlePage = null;
-
-    // first read the data and uncompressed it
-    dataPage = COMPRESSOR.unCompress(fileReader
-        .readByteArray(filePath, dimensionColumnChunk.get(blockIndex).getDataPageOffset(),
-            dimensionColumnChunk.get(blockIndex).getDataPageLength()));
-    // if row id block is present then read the row id chunk and uncompress it
-    if (CarbonUtil.hasEncoding(dimensionColumnChunk.get(blockIndex).getEncodingList(),
-        Encoding.INVERTED_INDEX)) {
-      invertedIndexes = CarbonUtil
-          .getUnCompressColumnIndex(dimensionColumnChunk.get(blockIndex).getRowIdPageLength(),
-              fileReader.readByteArray(filePath,
-                  dimensionColumnChunk.get(blockIndex).getRowIdPageOffset(),
-                  dimensionColumnChunk.get(blockIndex).getRowIdPageLength()), numberComressor);
-      // get the reverse index
-      invertedIndexesReverse = getInvertedReverseIndex(invertedIndexes);
-    }
-    // if rle is applied then read the rle block chunk and then uncompress
-    //then actual data based on rle block
-    if (CarbonUtil
-        .hasEncoding(dimensionColumnChunk.get(blockIndex).getEncodingList(), Encoding.RLE)) {
-      // read and uncompress the rle block
-      rlePage = numberComressor.unCompress(fileReader
-          .readByteArray(filePath, dimensionColumnChunk.get(blockIndex).getRlePageOffset(),
-              dimensionColumnChunk.get(blockIndex).getRlePageLength()));
-      // uncompress the data with rle indexes
-      dataPage = UnBlockIndexer.uncompressData(dataPage, rlePage, eachColumnValueSize[blockIndex]);
-      rlePage = null;
-    }
-    // fill chunk attributes
-    DimensionChunkAttributes chunkAttributes = new DimensionChunkAttributes();
-    chunkAttributes.setEachRowSize(eachColumnValueSize[blockIndex]);
-    chunkAttributes.setInvertedIndexes(invertedIndexes);
-    chunkAttributes.setInvertedIndexesReverse(invertedIndexesReverse);
-    DimensionColumnDataChunk columnDataChunk = null;
-
-    if (dimensionColumnChunk.get(blockIndex).isRowMajor()) {
-      // to store fixed length column chunk values
-      columnDataChunk = new ColumnGroupDimensionDataChunk(dataPage, chunkAttributes);
-    }
-    // if no dictionary column then first create a no dictionary column chunk
-    // and set to data chunk instance
-    else if (!CarbonUtil
-        .hasEncoding(dimensionColumnChunk.get(blockIndex).getEncodingList(), Encoding.DICTIONARY)) {
-      columnDataChunk =
-          new VariableLengthDimensionDataChunk(getNoDictionaryDataChunk(dataPage), chunkAttributes);
-      chunkAttributes.setNoDictionary(true);
-    } else {
-      // to store fixed length column chunk values
-      columnDataChunk = new FixedLengthDimensionDataChunk(dataPage, chunkAttributes);
-    }
-    return columnDataChunk;
-  }
-
-}


[18/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForInt.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForInt.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForInt.java
deleted file mode 100644
index 66fbfbf..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForInt.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.columnar;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.ByteUtil;
-
-public class BlockIndexerStorageForInt implements IndexStorage<int[]> {
-  private boolean alreadySorted;
-
-  private int[] dataAfterComp;
-
-  private int[] indexMap;
-
-  private byte[][] keyBlock;
-
-  private int[] dataIndexMap;
-
-  private int totalSize;
-
-  public BlockIndexerStorageForInt(byte[][] keyBlock, boolean compressData, boolean isNoDictionary,
-      boolean isSortRequired) {
-    ColumnWithIntIndex[] columnWithIndexs = createColumnWithIndexArray(keyBlock, isNoDictionary);
-    if (isSortRequired) {
-      Arrays.sort(columnWithIndexs);
-    }
-    compressMyOwnWay(extractDataAndReturnIndexes(columnWithIndexs, keyBlock));
-    if (compressData) {
-      compressDataMyOwnWay(columnWithIndexs);
-    }
-  }
-
-  /**
-   * Create an object with each column array and respective index
-   *
-   * @return
-   */
-  private ColumnWithIntIndex[] createColumnWithIndexArray(byte[][] keyBlock,
-      boolean isNoDictionary) {
-    ColumnWithIntIndex[] columnWithIndexs;
-    if (isNoDictionary) {
-      columnWithIndexs = new ColumnWithIntIndexForHighCard[keyBlock.length];
-      for (int i = 0; i < columnWithIndexs.length; i++) {
-        columnWithIndexs[i] = new ColumnWithIntIndexForHighCard(keyBlock[i], i);
-      }
-
-    } else {
-      columnWithIndexs = new ColumnWithIntIndex[keyBlock.length];
-      for (int i = 0; i < columnWithIndexs.length; i++) {
-        columnWithIndexs[i] = new ColumnWithIntIndex(keyBlock[i], i);
-      }
-    }
-
-    return columnWithIndexs;
-  }
-
-  private int[] extractDataAndReturnIndexes(ColumnWithIntIndex[] columnWithIndexs,
-      byte[][] keyBlock) {
-    int[] indexes = new int[columnWithIndexs.length];
-    for (int i = 0; i < indexes.length; i++) {
-      indexes[i] = columnWithIndexs[i].getIndex();
-      keyBlock[i] = columnWithIndexs[i].getColumn();
-    }
-    this.keyBlock = keyBlock;
-    return indexes;
-  }
-
-  /**
-   * It compresses depends up on the sequence numbers.
-   * [1,2,3,4,6,8,10,11,12,13] is translated to [1,4,6,8,10,13] and [0,6]. In
-   * first array the start and end of sequential numbers and second array
-   * keeps the indexes of where sequential numbers starts. If there is no
-   * sequential numbers then the same array it returns with empty second
-   * array.
-   *
-   * @param indexes
-   */
-  public void compressMyOwnWay(int[] indexes) {
-    List<Integer> list = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-    List<Integer> map = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-    int k = 0;
-    int i = 1;
-    for (; i < indexes.length; i++) {
-      if (indexes[i] - indexes[i - 1] == 1) {
-        k++;
-      } else {
-        if (k > 0) {
-          map.add((list.size()));
-          list.add(indexes[i - k - 1]);
-          list.add(indexes[i - 1]);
-        } else {
-          list.add(indexes[i - 1]);
-        }
-        k = 0;
-      }
-    }
-    if (k > 0) {
-      map.add((list.size()));
-      list.add(indexes[i - k - 1]);
-      list.add(indexes[i - 1]);
-    } else {
-      list.add(indexes[i - 1]);
-    }
-    dataAfterComp = convertToArray(list);
-    if (indexes.length == dataAfterComp.length) {
-      indexMap = new int[0];
-    } else {
-      indexMap = convertToArray(map);
-    }
-    if (dataAfterComp.length == 2 && indexMap.length == 1) {
-      alreadySorted = true;
-    }
-  }
-
-  private int[] convertToArray(List<Integer> list) {
-    int[] shortArray = new int[list.size()];
-    for (int i = 0; i < shortArray.length; i++) {
-      shortArray[i] = list.get(i);
-    }
-    return shortArray;
-  }
-
-  /**
-   * @return the alreadySorted
-   */
-  public boolean isAlreadySorted() {
-    return alreadySorted;
-  }
-
-  /**
-   * @return the dataAfterComp
-   */
-  public int[] getDataAfterComp() {
-    return dataAfterComp;
-  }
-
-  /**
-   * @return the indexMap
-   */
-  public int[] getIndexMap() {
-    return indexMap;
-  }
-
-  /**
-   * @return the keyBlock
-   */
-  public byte[][] getKeyBlock() {
-    return keyBlock;
-  }
-
-  private void compressDataMyOwnWay(ColumnWithIntIndex[] indexes) {
-    byte[] prvKey = indexes[0].getColumn();
-    List<ColumnWithIntIndex> list =
-        new ArrayList<ColumnWithIntIndex>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-    list.add(indexes[0]);
-    int counter = 1;
-    int start = 0;
-    List<Integer> map = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-    for (int i = 1; i < indexes.length; i++) {
-      if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(prvKey, indexes[i].getColumn()) != 0) {
-        prvKey = indexes[i].getColumn();
-        list.add(indexes[i]);
-        map.add(start);
-        map.add(counter);
-        start += counter;
-        counter = 1;
-        continue;
-      }
-      counter++;
-    }
-    map.add(start);
-    map.add(counter);
-    this.keyBlock = convertToKeyArray(list);
-    if (indexes.length == keyBlock.length) {
-      dataIndexMap = new int[0];
-    } else {
-      dataIndexMap = convertToArray(map);
-    }
-  }
-
-  private byte[][] convertToKeyArray(List<ColumnWithIntIndex> list) {
-    byte[][] shortArray = new byte[list.size()][];
-    for (int i = 0; i < shortArray.length; i++) {
-      shortArray[i] = list.get(i).getColumn();
-      totalSize += shortArray[i].length;
-    }
-    return shortArray;
-  }
-
-  @Override public int[] getDataIndexMap() {
-    return dataIndexMap;
-  }
-
-  @Override public int getTotalSize() {
-    return totalSize;
-  }
-
-  @Override public byte[] getMin() {
-    return keyBlock[0];
-  }
-
-  @Override public byte[] getMax() {
-    return keyBlock[keyBlock.length - 1];
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForNoInvertedIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForNoInvertedIndex.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForNoInvertedIndex.java
deleted file mode 100644
index 175af66..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForNoInvertedIndex.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.columnar;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.List;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.ByteUtil;
-
-public class BlockIndexerStorageForNoInvertedIndex implements IndexStorage<int[]> {
-  private byte[][] keyBlock;
-  private byte[][] sortedBlock;
-  private int totalSize;
-  private int[] dataIndexMap;
-
-  public BlockIndexerStorageForNoInvertedIndex(byte[][] keyBlockInput, boolean compressData,
-      boolean isNoDictionary) {
-    // without invertedindex but can be RLE
-    if (compressData) {
-      // with RLE
-      byte[] prvKey = keyBlockInput[0];
-      List<byte[]> list = new ArrayList<byte[]>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-      list.add(keyBlockInput[0]);
-      int counter = 1;
-      int start = 0;
-      List<Integer> map = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-      int length = keyBlockInput.length;
-      for(int i = 1; i < length; i++) {
-        if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(prvKey, keyBlockInput[i]) != 0) {
-          prvKey = keyBlockInput[i];
-          list.add(keyBlockInput[i]);
-          map.add(start);
-          map.add(counter);
-          start += counter;
-          counter = 1;
-          continue;
-        }
-        counter++;
-      }
-      map.add(start);
-      map.add(counter);
-      this.keyBlock = convertToKeyArray(list);
-      if (keyBlockInput.length == this.keyBlock.length) {
-        dataIndexMap = new int[0];
-      } else {
-        dataIndexMap = convertToArray(map);
-      }
-    } else {
-      this.keyBlock = keyBlockInput;
-      dataIndexMap = new int[0];
-    }
-
-    this.sortedBlock = new byte[keyBlock.length][];
-    System.arraycopy(keyBlock, 0, sortedBlock, 0, keyBlock.length);
-    if (isNoDictionary) {
-      Arrays.sort(sortedBlock, new Comparator<byte[]>() {
-        @Override
-        public int compare(byte[] col1, byte[] col2) {
-          return ByteUtil.UnsafeComparer.INSTANCE
-              .compareTo(col1, 2, col1.length - 2, col2, 2, col2.length - 2);
-        }
-      });
-    } else {
-      Arrays.sort(sortedBlock, new Comparator<byte[]>() {
-        @Override
-        public int compare(byte[] col1, byte[] col2) {
-          return ByteUtil.UnsafeComparer.INSTANCE.compareTo(col1, col2);
-        }
-      });
-    }
-
-  }
-
-  private int[] convertToArray(List<Integer> list) {
-    int[] shortArray = new int[list.size()];
-    for(int i = 0; i < shortArray.length; i++) {
-      shortArray[i] = list.get(i);
-    }
-    return shortArray;
-  }
-
-  private byte[][] convertToKeyArray(List<byte[]> list) {
-    byte[][] shortArray = new byte[list.size()][];
-    for (int i = 0; i < shortArray.length; i++) {
-      shortArray[i] = list.get(i);
-      totalSize += shortArray[i].length;
-    }
-    return shortArray;
-  }
-
-  @Override
-  public int[] getDataIndexMap() {
-    return dataIndexMap;
-  }
-
-  @Override
-  public int getTotalSize() {
-    return totalSize;
-  }
-
-  @Override
-  public boolean isAlreadySorted() {
-    return true;
-  }
-
-  /**
-   * no use
-   * @return
-   */
-  @Override
-  public int[] getDataAfterComp() {
-    return new int[0];
-  }
-
-  /**
-   * no use
-   * @return
-   */
-  @Override
-  public int[] getIndexMap() {
-    return new int[0];
-  }
-
-  /**
-   * @return the keyBlock
-   */
-  public byte[][] getKeyBlock() {
-    return keyBlock;
-  }
-
-  @Override public byte[] getMin() {
-    return sortedBlock[0];
-  }
-
-  @Override public byte[] getMax() {
-    return sortedBlock[sortedBlock.length - 1];
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnGroupModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnGroupModel.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnGroupModel.java
deleted file mode 100644
index 26b2519..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnGroupModel.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.datastorage.store.columnar;
-
-public class ColumnGroupModel {
-
-  /**
-   * number of columns in columnar block
-   */
-  private int[] columnSplit;
-
-  /**
-   * total number of columns
-   */
-  private int noOfColumnsStore;
-
-  /**
-   * whether given index is columnar or not
-   * true: columnar
-   * false: row block
-   */
-  private boolean[] columnarStore;
-
-  /**
-   * column groups
-   * e.g
-   * {{0,1,2},3,4,{5,6}}
-   */
-  private int[][] columnGroups;
-
-  /**
-   * return columnSplit
-   *
-   * @return
-   */
-  public int[] getColumnSplit() {
-    return columnSplit;
-  }
-
-  /**
-   * set columnSplit
-   *
-   * @param split
-   */
-  public void setColumnSplit(int[] split) {
-    this.columnSplit = split;
-  }
-
-  /**
-   * @return no of columnar block
-   */
-  public int getNoOfColumnStore() {
-    return this.noOfColumnsStore;
-  }
-
-  /**
-   * set no of columnar block
-   *
-   * @param noOfColumnsStore
-   */
-  public void setNoOfColumnStore(int noOfColumnsStore) {
-    this.noOfColumnsStore = noOfColumnsStore;
-  }
-
-  /**
-   * it's an identifier for row block or single column block
-   *
-   * @param columnarStore
-   */
-  public void setColumnarStore(boolean[] columnarStore) {
-    this.columnarStore = columnarStore;
-  }
-
-  /**
-   * set column groups
-   *
-   * @param columnGroups
-   */
-  public void setColumnGroup(int[][] columnGroups) {
-    this.columnGroups = columnGroups;
-  }
-
-  /**
-   * check if given column group is columnar
-   *
-   * @param colGroup
-   * @return true if given block is columnar
-   */
-  public boolean isColumnar(int colGroup) {
-    return columnarStore[colGroup];
-  }
-
-  /**
-   * @return columngroups
-   */
-  public int[][] getColumnGroup() {
-    return this.columnGroups;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnWithIntIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnWithIntIndex.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnWithIntIndex.java
deleted file mode 100644
index 958eb84..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnWithIntIndex.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.columnar;
-
-import java.util.Arrays;
-
-import org.carbondata.core.util.ByteUtil;
-
-public class ColumnWithIntIndex implements Comparable<ColumnWithIntIndex> {
-  protected byte[] column;
-
-  private int index;
-
-  public ColumnWithIntIndex(byte[] column, int index) {
-    this.column = column;
-    this.index = index;
-  }
-
-  public ColumnWithIntIndex() {
-  }
-
-  /**
-   * @return the column
-   */
-  public byte[] getColumn() {
-    return column;
-  }
-
-  /**
-   * @param column the column to set
-   */
-  public void setColumn(byte[] column) {
-    this.column = column;
-  }
-
-  /**
-   * @return the index
-   */
-  public int getIndex() {
-    return index;
-  }
-
-  /**
-   * @param index the index to set
-   */
-  public void setIndex(int index) {
-    this.index = index;
-  }
-
-  @Override public int compareTo(ColumnWithIntIndex o) {
-    return ByteUtil.UnsafeComparer.INSTANCE.compareTo(column, o.column);
-  }
-
-  @Override public boolean equals(Object obj) {
-    if(obj == null || getClass() != obj.getClass()) {
-      return false;
-    }
-    ColumnWithIntIndex o = (ColumnWithIntIndex)obj;
-    return Arrays.equals(column, o.column) && index == o.index;
-  }
-
-  @Override public int hashCode() {
-    return Arrays.hashCode(column) + index;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnWithIntIndexForHighCard.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnWithIntIndexForHighCard.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnWithIntIndexForHighCard.java
deleted file mode 100644
index d2dc5e1..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnWithIntIndexForHighCard.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.columnar;
-
-import java.util.Arrays;
-
-import org.carbondata.core.util.ByteUtil.UnsafeComparer;
-
-public class ColumnWithIntIndexForHighCard extends ColumnWithIntIndex
-    implements Comparable<ColumnWithIntIndex> {
-
-  public ColumnWithIntIndexForHighCard(byte[] column, int index) {
-    super(column, index);
-  }
-
-  @Override public int compareTo(ColumnWithIntIndex o) {
-    return UnsafeComparer.INSTANCE
-        .compareTo(column, 2, column.length - 2, o.column, 2, o.column.length - 2);
-  }
-
-  @Override public boolean equals(Object obj) {
-    if(obj == null || getClass() != obj.getClass()) {
-      return false;
-    }
-    ColumnWithIntIndexForHighCard o = (ColumnWithIntIndexForHighCard)obj;
-    return Arrays.equals(column, o.column) && getIndex() == o.getIndex();
-  }
-
-  @Override public int hashCode() {
-    return Arrays.hashCode(column) + getIndex();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStore.java
deleted file mode 100644
index 10821c8..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStore.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.columnar;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-
-public interface ColumnarKeyStore {
-  /**
-   * This method will be used to get the actual mdkeys array present in the
-   * carbon store, it will read and uncomnpress the key
-   *
-   * @param fileHolder
-   * @return mdkey
-   * @noDictionaryValKeyIndexes, directkey indexes for determining the NO_DICTIONARY
-   * Col inorder to process the direct surrogates data.
-   */
-  ColumnarKeyStoreDataHolder[] getUnCompressedKeyArray(FileHolder fileHolder, int[] blockIndex,
-      boolean[] needCompressedData, int[] noDictionaryValKeyIndexes);
-
-  /**
-   * This method will be used to get the actual mdkeys array present in the
-   * carbon store, it will read and uncomnpress the key
-   *
-   * @param fileHolder
-   * @return mdkey
-   */
-  ColumnarKeyStoreDataHolder getUnCompressedKeyArray(FileHolder fileHolder, int blockIndex,
-      boolean needCompressedData, int[] noDictionaryValKeyIndexes);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreDataHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreDataHolder.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreDataHolder.java
deleted file mode 100644
index 55c4036..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreDataHolder.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.columnar;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-
-public class ColumnarKeyStoreDataHolder {
-  private byte[] keyblockData;
-  private List<byte[]> noDictionaryValBasedKeyBlockData;
-  private ColumnarKeyStoreMetadata columnarKeyStoreMetadata;
-
-  public ColumnarKeyStoreDataHolder(final byte[] keyblockData,
-      final ColumnarKeyStoreMetadata columnarKeyStoreMetadata) {
-    this.keyblockData = keyblockData;
-    this.columnarKeyStoreMetadata = columnarKeyStoreMetadata;
-  }
-
-  //Added constructor for holding noDictionaryValBasedKeyBlockData
-  public ColumnarKeyStoreDataHolder(final List<byte[]> noDictionaryValBasedKeyBlockData,
-      final ColumnarKeyStoreMetadata columnarKeyStoreMetadata) {
-    this.noDictionaryValBasedKeyBlockData = noDictionaryValBasedKeyBlockData;
-    this.columnarKeyStoreMetadata = columnarKeyStoreMetadata;
-  }
-
-  public byte[] getKeyBlockData() {
-    return keyblockData;
-  }
-
-  /**
-   * @return the columnarKeyStoreMetadata
-   */
-  public ColumnarKeyStoreMetadata getColumnarKeyStoreMetadata() {
-    return columnarKeyStoreMetadata;
-  }
-
-  public void unCompress() {
-    if (columnarKeyStoreMetadata.isUnCompressed()) {
-      return;
-    }
-    this.keyblockData = UnBlockIndexer
-        .uncompressData(keyblockData, columnarKeyStoreMetadata.getDataIndex(),
-            columnarKeyStoreMetadata.getEachRowSize());
-    columnarKeyStoreMetadata.setUnCompressed(true);
-  }
-
-  public int getSurrogateKey(int columnIndex) {
-    byte[] actual = new byte[4];
-    int startIndex;
-    if (null != columnarKeyStoreMetadata.getColumnReverseIndex()) {
-      startIndex =
-          columnarKeyStoreMetadata.getColumnReverseIndex()[columnIndex] * columnarKeyStoreMetadata
-              .getEachRowSize();
-    } else {
-      startIndex = columnIndex * columnarKeyStoreMetadata.getEachRowSize();
-    }
-    int destPos = 4 - columnarKeyStoreMetadata.getEachRowSize();
-    System.arraycopy(keyblockData, startIndex, actual, destPos,
-        columnarKeyStoreMetadata.getEachRowSize());
-    return ByteBuffer.wrap(actual).getInt();
-  }
-
-  /**
-   * get the byte[] for high cardinality column block
-   *
-   * @return List<byte[]>.
-   */
-  public List<byte[]> getNoDictionaryValBasedKeyBlockData() {
-    return noDictionaryValBasedKeyBlockData;
-  }
-
-  /**
-   * set the byte[] for high cardinality column block
-   *
-   * @param noDictionaryValBasedKeyBlockData
-   */
-  public void setNoDictionaryValBasedKeyBlockData(List<byte[]> noDictionaryValBasedKeyBlockData) {
-    this.noDictionaryValBasedKeyBlockData = noDictionaryValBasedKeyBlockData;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreInfo.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreInfo.java
deleted file mode 100644
index 0cf0d99..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreInfo.java
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.columnar;
-
-import org.carbondata.core.keygenerator.mdkey.NumberCompressor;
-
-public class ColumnarKeyStoreInfo {
-  private int numberOfKeys;
-
-  private int[] sizeOfEachBlock;
-
-  private int[] keyBlockLengths;
-
-  private long[] keyBlockOffsets;
-
-  private int[] keyBlockIndexLength;
-
-  private long[] keyBlockIndexOffsets;
-
-  private String filePath;
-
-  private boolean[] isSorted;
-
-  private int[] cardinality;
-
-  private NumberCompressor numberCompressor;
-
-  private NumberCompressor[] keyBlockUnCompressor;
-
-  private ColumnGroupModel hybridStoreModel;
-
-  /**
-   * dataIndexMap
-   */
-  private int[] dataIndexMapLength;
-
-  /**
-   * dataIndexMap
-   */
-  private long[] dataIndexMapOffsets;
-
-  /**
-   * aggKeyBlock
-   */
-  private boolean[] aggKeyBlock;
-
-  /**
-   * @return the numberOfKeys
-   */
-  public int getNumberOfKeys() {
-    return numberOfKeys;
-  }
-
-  /**
-   * @param numberOfKeys the numberOfKeys to set
-   */
-  public void setNumberOfKeys(int numberOfKeys) {
-    this.numberOfKeys = numberOfKeys;
-  }
-
-  /**
-   * @return the sizeOfEachBlock
-   */
-  public int[] getSizeOfEachBlock() {
-    return sizeOfEachBlock;
-  }
-
-  /**
-   * @param sizeOfEachBlock the sizeOfEachBlock to set
-   */
-  public void setSizeOfEachBlock(int[] sizeOfEachBlock) {
-    this.sizeOfEachBlock = sizeOfEachBlock;
-  }
-
-  /**
-   * @return the keyBlockLengths
-   */
-  public int[] getKeyBlockLengths() {
-    return keyBlockLengths;
-  }
-
-  /**
-   * @param keyBlockLengths the keyBlockLengths to set
-   */
-  public void setKeyBlockLengths(int[] keyBlockLengths) {
-    this.keyBlockLengths = keyBlockLengths;
-  }
-
-  /**
-   * @return the keyBlockOffsets
-   */
-  public long[] getKeyBlockOffsets() {
-    return keyBlockOffsets;
-  }
-
-  /**
-   * @param keyBlockOffsets the keyBlockOffsets to set
-   */
-  public void setKeyBlockOffsets(long[] keyBlockOffsets) {
-    this.keyBlockOffsets = keyBlockOffsets;
-  }
-
-  /**
-   * @return the keyBlockIndexLength
-   */
-  public int[] getKeyBlockIndexLength() {
-    return keyBlockIndexLength;
-  }
-
-  /**
-   * @param keyBlockIndexLength the keyBlockIndexLength to set
-   */
-  public void setKeyBlockIndexLength(int[] keyBlockIndexLength) {
-    this.keyBlockIndexLength = keyBlockIndexLength;
-  }
-
-  /**
-   * @return the keyBlockIndexOffsets
-   */
-  public long[] getKeyBlockIndexOffsets() {
-    return keyBlockIndexOffsets;
-  }
-
-  /**
-   * @param keyBlockIndexOffsets the keyBlockIndexOffsets to set
-   */
-  public void setKeyBlockIndexOffsets(long[] keyBlockIndexOffsets) {
-    this.keyBlockIndexOffsets = keyBlockIndexOffsets;
-  }
-
-  /**
-   * @return the filePath
-   */
-  public String getFilePath() {
-    return filePath;
-  }
-
-  /**
-   * @param filePath the filePath to set
-   */
-  public void setFilePath(String filePath) {
-    this.filePath = filePath;
-  }
-
-  /**
-   * @return the isSorted
-   */
-  public boolean[] getIsSorted() {
-    return isSorted;
-  }
-
-  /**
-   * @param isSorted the isSorted to set
-   */
-  public void setIsSorted(boolean[] isSorted) {
-    this.isSorted = isSorted;
-  }
-
-  /**
-   * @return the numberCompressor
-   */
-  public NumberCompressor getNumberCompressor() {
-    return numberCompressor;
-  }
-
-  /**
-   * @param numberCompressor the numberCompressor to set
-   */
-  public void setNumberCompressor(NumberCompressor numberCompressor) {
-    this.numberCompressor = numberCompressor;
-  }
-
-  /**
-   * @return the dataIndexMapLength
-   */
-  public int[] getDataIndexMapLength() {
-    return dataIndexMapLength;
-  }
-
-  /**
-   * @param dataIndexMapLength the dataIndexMapLength to set
-   */
-  public void setDataIndexMapLength(int[] dataIndexMapLength) {
-    this.dataIndexMapLength = dataIndexMapLength;
-  }
-
-  /**
-   * @return the dataIndexMapOffsets
-   */
-  public long[] getDataIndexMapOffsets() {
-    return dataIndexMapOffsets;
-  }
-
-  /**
-   * @param dataIndexMapOffsets the dataIndexMapOffsets to set
-   */
-  public void setDataIndexMapOffsets(long[] dataIndexMapOffsets) {
-    this.dataIndexMapOffsets = dataIndexMapOffsets;
-  }
-
-  /**
-   * @return the aggKeyBlock
-   */
-  public boolean[] getAggKeyBlock() {
-    return aggKeyBlock;
-  }
-
-  /**
-   * @param aggKeyBlock the aggKeyBlock to set
-   */
-  public void setAggKeyBlock(boolean[] aggKeyBlock) {
-    this.aggKeyBlock = aggKeyBlock;
-  }
-
-  /**
-   * @return the keyBlockUnCompressor
-   */
-  public NumberCompressor[] getKeyBlockUnCompressor() {
-    return keyBlockUnCompressor;
-  }
-
-  /**
-   * @param keyBlockUnCompressor the keyBlockUnCompressor to set
-   */
-  public void setKeyBlockUnCompressor(NumberCompressor[] keyBlockUnCompressor) {
-    this.keyBlockUnCompressor = keyBlockUnCompressor;
-  }
-
-  public int[] getCardinality() {
-    return cardinality;
-  }
-
-  public void setCardinality(int[] cardinality) {
-    this.cardinality = cardinality;
-  }
-
-  public ColumnGroupModel getHybridStoreModel() {
-    return hybridStoreModel;
-  }
-
-  public void setHybridStoreModel(ColumnGroupModel hybridStoreModel) {
-    this.hybridStoreModel = hybridStoreModel;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreMetadata.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreMetadata.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreMetadata.java
deleted file mode 100644
index 81834cb..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreMetadata.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.columnar;
-
-import org.carbondata.core.keygenerator.KeyGenerator;
-import org.carbondata.core.keygenerator.factory.KeyGeneratorFactory;
-
-public class ColumnarKeyStoreMetadata {
-  private boolean isSorted;
-
-  private int[] columnIndex;
-
-  private int[] columnReverseIndex;
-
-  private int eachRowSize;
-
-  private int[] dataIndex;
-
-  private boolean isUnCompressed;
-
-  private KeyGenerator keyGenerator;
-
-  /**
-   * isNoDictionaryValColumn.
-   */
-  private boolean isNoDictionaryValColumn;
-  private boolean isRowStore;
-
-  public ColumnarKeyStoreMetadata(int eachRowSize) {
-    this.eachRowSize = eachRowSize;
-    keyGenerator = KeyGeneratorFactory.getKeyGenerator(new int[] { eachRowSize });
-  }
-
-  /**
-   * @return the isSorted
-   */
-  public boolean isSorted() {
-    return isSorted;
-  }
-
-  /**
-   * @param isSorted the isSorted to set
-   */
-  public void setSorted(boolean isSorted) {
-    this.isSorted = isSorted;
-  }
-
-  /**
-   * @return the columnIndex
-   */
-  public int[] getColumnIndex() {
-    return columnIndex;
-  }
-
-  /**
-   * @param columnIndex the columnIndex to set
-   */
-  public void setColumnIndex(int[] columnIndex) {
-    this.columnIndex = columnIndex;
-  }
-
-  /**
-   * @return the eachRowSize
-   */
-  public int getEachRowSize() {
-    return eachRowSize;
-  }
-
-  /**
-   * @return the dataIndex
-   */
-  public int[] getDataIndex() {
-    return dataIndex;
-  }
-
-  /**
-   * @param dataIndex the dataIndex to set
-   */
-  public void setDataIndex(int[] dataIndex) {
-    this.dataIndex = dataIndex;
-  }
-
-  /**
-   * @return the columnReverseIndex
-   */
-  public int[] getColumnReverseIndex() {
-    return columnReverseIndex;
-  }
-
-  /**
-   * @param columnReverseIndex the columnReverseIndex to set
-   */
-  public void setColumnReverseIndex(int[] columnReverseIndex) {
-    this.columnReverseIndex = columnReverseIndex;
-  }
-
-  public boolean isUnCompressed() {
-    return isUnCompressed;
-  }
-
-  public void setUnCompressed(boolean isUnCompressed) {
-    this.isUnCompressed = isUnCompressed;
-  }
-
-  public KeyGenerator getKeyGenerator() {
-    return keyGenerator;
-  }
-
-  public boolean isRowStore() {
-    return isRowStore;
-  }
-
-  public void setRowStore(boolean isRowStore) {
-    this.isRowStore = isRowStore;
-  }
-
-  /**
-   * @return
-   */
-  public boolean isNoDictionaryValColumn() {
-    return isNoDictionaryValColumn;
-
-  }
-
-  /**
-   * @param isNoDictionaryValColumn
-   */
-  public void setNoDictionaryValColumn(boolean isNoDictionaryValColumn) {
-    this.isNoDictionaryValColumn = isNoDictionaryValColumn;
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/IndexStorage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/IndexStorage.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/IndexStorage.java
deleted file mode 100644
index 5e1b3f8..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/IndexStorage.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.columnar;
-
-public interface IndexStorage<T> {
-  boolean isAlreadySorted();
-
-  T getDataAfterComp();
-
-  T getIndexMap();
-
-  byte[][] getKeyBlock();
-
-  T getDataIndexMap();
-
-  int getTotalSize();
-
-  /**
-   * @return min value of block
-   */
-  byte[] getMin();
-
-  /**
-   * @return max value of block
-   */
-  byte[] getMax();
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/columnar/UnBlockIndexer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/UnBlockIndexer.java b/core/src/main/java/org/carbondata/core/datastorage/store/columnar/UnBlockIndexer.java
deleted file mode 100644
index a20320b..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/columnar/UnBlockIndexer.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.columnar;
-
-import java.util.Arrays;
-
-public final class UnBlockIndexer {
-
-  private UnBlockIndexer() {
-
-  }
-
-  public static int[] uncompressIndex(int[] indexData, int[] indexMap) {
-    int actualSize = indexData.length;
-    for (int i = 0; i < indexMap.length; i++) {
-      actualSize += indexData[indexMap[i] + 1] - indexData[indexMap[i]] - 1;
-    }
-    int[] indexes = new int[actualSize];
-    int k = 0;
-    for (int i = 0; i < indexData.length; i++) {
-      int index = Arrays.binarySearch(indexMap, i);
-      if (index > -1) {
-        for (int j = indexData[indexMap[index]]; j <= indexData[indexMap[index] + 1]; j++) {
-          indexes[k] = j;
-          k++;
-        }
-        i++;
-      } else {
-        indexes[k] = indexData[i];
-        k++;
-      }
-    }
-    return indexes;
-  }
-
-  public static byte[] uncompressData(byte[] data, int[] index, int keyLen) {
-    if (index.length < 1) {
-      return data;
-    }
-    int numberOfCopy = 0;
-    int actualSize = 0;
-    int srcPos = 0;
-    int destPos = 0;
-    for (int i = 1; i < index.length; i += 2) {
-      actualSize += index[i];
-    }
-    byte[] uncompressedData = new byte[actualSize * keyLen];
-    int picIndex = 0;
-    for (int i = 0; i < data.length; i += keyLen) {
-      numberOfCopy = index[picIndex * 2 + 1];
-      picIndex++;
-      for (int j = 0; j < numberOfCopy; j++) {
-        System.arraycopy(data, srcPos, uncompressedData, destPos, keyLen);
-        destPos += keyLen;
-      }
-      srcPos += keyLen;
-    }
-    return uncompressedData;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/Compressor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/Compressor.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/Compressor.java
deleted file mode 100644
index e1db7d0..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/Compressor.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression;
-
-public interface Compressor<T> {
-
-  byte[] compress(T input);
-
-  T unCompress(byte[] input);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/MeasureMetaDataModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/MeasureMetaDataModel.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/MeasureMetaDataModel.java
deleted file mode 100644
index bc169b1..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/MeasureMetaDataModel.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression;
-
-public class MeasureMetaDataModel {
-  /**
-   * maxValue
-   */
-  private Object[] maxValue;
-
-  /**
-   * minValue
-   */
-  private Object[] minValue;
-
-  /**
-   * decimal
-   */
-  private int[] decimal;
-
-  /**
-   * measureCount
-   */
-  private int measureCount;
-
-  /**
-   * uniqueValue
-   */
-  private Object[] uniqueValue;
-
-  /**
-   * type
-   */
-  private char[] type;
-
-  /**
-   * dataTypeSelected
-   */
-  private byte[] dataTypeSelected;
-
-  private Object[] minValueFactForAgg;
-
-  public MeasureMetaDataModel() {
-
-  }
-
-  /**
-   * MeasureMetaDataModel Constructor
-   *
-   * @param minValue
-   * @param maxValue
-   * @param decimal
-   * @param measureCount
-   * @param uniqueValue
-   * @param type
-   */
-  public MeasureMetaDataModel(Object[] minValue, Object[] maxValue, int[] decimal, int measureCount,
-      Object[] uniqueValue, char[] type, byte[] dataTypeSelected) {
-    this.minValue = minValue;
-    this.maxValue = maxValue;
-    this.decimal = decimal;
-    this.measureCount = measureCount;
-    this.uniqueValue = uniqueValue;
-    this.type = type;
-    this.dataTypeSelected = dataTypeSelected;
-  }
-
-  /**
-   * get Max value
-   *
-   * @return
-   */
-  public Object[] getMaxValue() {
-    return maxValue;
-  }
-
-  /**
-   * set max value
-   *
-   * @param maxValue
-   */
-  public void setMaxValue(Object[] maxValue) {
-    this.maxValue = maxValue;
-  }
-
-  /**
-   * getMinValue
-   *
-   * @return
-   */
-  public Object[] getMinValue() {
-    return minValue;
-  }
-
-  /**
-   * setMinValue
-   *
-   * @param minValue
-   */
-  public void setMinValue(Object[] minValue) {
-    this.minValue = minValue;
-  }
-
-  /**
-   * getDecimal
-   *
-   * @return
-   */
-  public int[] getDecimal() {
-    return decimal;
-  }
-
-  /**
-   * setDecimal
-   *
-   * @param decimal
-   */
-  public void setDecimal(int[] decimal) {
-    this.decimal = decimal;
-  }
-
-  /**
-   * getMeasureCount
-   *
-   * @return
-   */
-  public int getMeasureCount() {
-    return measureCount;
-  }
-
-  /**
-   * setMeasureCount
-   *
-   * @param measureCount
-   */
-  public void setMeasureCount(int measureCount) {
-    this.measureCount = measureCount;
-  }
-
-  /**
-   * getUniqueValue
-   *
-   * @return
-   */
-  public Object[] getUniqueValue() {
-    return uniqueValue;
-  }
-
-  /**
-   * setUniqueValue
-   *
-   * @param uniqueValue
-   */
-  public void setUniqueValue(Object[] uniqueValue) {
-    this.uniqueValue = uniqueValue;
-  }
-
-  /**
-   * @return the type
-   */
-  public char[] getType() {
-    return type;
-  }
-
-  /**
-   * @param type the type to set
-   */
-  public void setType(char[] type) {
-    this.type = type;
-  }
-
-  /**
-   * @return the dataTypeSelected
-   */
-  public byte[] getDataTypeSelected() {
-    return dataTypeSelected;
-  }
-
-  /**
-   * @param dataTypeSelected the dataTypeSelected to set
-   */
-  public void setDataTypeSelected(byte[] dataTypeSelected) {
-    this.dataTypeSelected = dataTypeSelected;
-  }
-
-  /**
-   * @return the minValueFactForAgg
-   */
-  public Object[] getMinValueFactForAgg() {
-    return minValueFactForAgg;
-  }
-
-  /**
-   * @param minValueFactForAgg the minValueFactForAgg to set
-   */
-  public void setMinValueFactForAgg(Object[] minValueFactForAgg) {
-    this.minValueFactForAgg = minValueFactForAgg;
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/SnappyCompression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/SnappyCompression.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/SnappyCompression.java
deleted file mode 100644
index fd9a99e..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/SnappyCompression.java
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression;
-
-import java.io.IOException;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-
-import org.xerial.snappy.Snappy;
-
-public class SnappyCompression {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(SnappyCompression.class.getName());
-
-  /**
-   * SnappyByteCompression.
-   */
-  public static enum SnappyByteCompression implements Compressor<byte[]> {
-    /**
-     *
-     */
-    INSTANCE;
-
-    /**
-     * wrapper method for compressing byte[] unCompInput.
-     */
-    public byte[] compress(byte[] unCompInput) {
-      try {
-        return Snappy.rawCompress(unCompInput, unCompInput.length);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-        return null;
-      }
-    }
-
-    /**
-     * wrapper method for unCompress byte[] compInput.
-     *
-     * @return byte[].
-     */
-    public byte[] unCompress(byte[] compInput) {
-      try {
-        return Snappy.uncompress(compInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-      }
-      return compInput;
-    }
-  }
-
-  /**
-   * enum class for SnappyDoubleCompression.
-   */
-  public static enum SnappyDoubleCompression implements Compressor<double[]> {
-    /**
-     *
-     */
-    INSTANCE;
-
-    /**
-     * wrapper method for compressing double[] unCompInput.
-     */
-    public byte[] compress(double[] unCompInput) {
-      try {
-        return Snappy.compress(unCompInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-        return null;
-      }
-    }
-
-    /**
-     * wrapper method for unCompress byte[] compInput.
-     *
-     * @param compInput byte[].
-     * @return double[].
-     */
-    public double[] unCompress(byte[] compInput) {
-      try {
-        return Snappy.uncompressDoubleArray(compInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-      }
-      return null;
-    }
-
-  }
-
-  /**
-   * enum class for SnappyShortCompression.
-   *
-   * @author S71955
-   */
-  public static enum SnappyShortCompression implements Compressor<short[]> {
-    /**
-     *
-     */
-    INSTANCE;
-
-    /**
-     * wrapper method for compress short[] unCompInput.
-     *
-     * @param unCompInput short[].
-     * @return byte[].
-     */
-    public byte[] compress(short[] unCompInput) {
-      try {
-        return Snappy.compress(unCompInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-        return null;
-      }
-    }
-
-    /**
-     * wrapper method for uncompressShortArray.
-     *
-     * @param compInput byte[].
-     * @return short[].
-     */
-    public short[] unCompress(byte[] compInput) {
-      try {
-        return Snappy.uncompressShortArray(compInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-      }
-      return null;
-    }
-  }
-
-  /**
-   * enum class for SnappyIntCompression.
-   */
-  public static enum SnappyIntCompression implements Compressor<int[]> {
-    /**
-     *
-     */
-    INSTANCE;
-
-    /**
-     * wrapper method for compress int[] unCompInput.
-     *
-     * @param unCompInput int[].
-     * @return byte[].
-     */
-    public byte[] compress(int[] unCompInput) {
-      try {
-        return Snappy.compress(unCompInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-        return null;
-      }
-    }
-
-    /**
-     * wrapper method for uncompressIntArray.
-     *
-     * @param compInput byte[].
-     * @return int[].
-     */
-    public int[] unCompress(byte[] compInput) {
-      try {
-        return Snappy.uncompressIntArray(compInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-      }
-      return null;
-    }
-  }
-
-  /**
-   * enum class for SnappyLongCompression.
-   */
-  public static enum SnappyLongCompression implements Compressor<long[]> {
-    /**
-     *
-     */
-    INSTANCE;
-
-    /**
-     * wrapper method for compress long[] unCompInput.
-     *
-     * @param unCompInput long[].
-     * @return byte[].
-     */
-    public byte[] compress(long[] unCompInput) {
-      try {
-        return Snappy.compress(unCompInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-        return null;
-      }
-    }
-
-    /**
-     * wrapper method for uncompressLongArray.
-     *
-     * @param compInput byte[].
-     * @return long[].
-     */
-    public long[] unCompress(byte[] compInput) {
-      try {
-        return Snappy.uncompressLongArray(compInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-      }
-      return null;
-    }
-  }
-
-  /**
-   * enum class for SnappyFloatCompression.
-   */
-
-  public static enum SnappyFloatCompression implements Compressor<float[]> {
-    /**
-     *
-     */
-    INSTANCE;
-
-    /**
-     * wrapper method for compress float[] unCompInput.
-     *
-     * @param unCompInput float[].
-     * @return byte[].
-     */
-    public byte[] compress(float[] unCompInput) {
-      try {
-        return Snappy.compress(unCompInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-        return null;
-      }
-    }
-
-    /**
-     * wrapper method for uncompressFloatArray.
-     *
-     * @param compInput byte[].
-     * @return float[].
-     */
-    public float[] unCompress(byte[] compInput) {
-      try {
-        return Snappy.uncompressFloatArray(compInput);
-      } catch (IOException e) {
-        LOGGER.error(e, e.getMessage());
-      }
-      return null;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/ValueCompressionModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/ValueCompressionModel.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/ValueCompressionModel.java
deleted file mode 100644
index 83cb001..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/ValueCompressionModel.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression;
-
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class ValueCompressionModel {
-  /**
-   * COMPRESSION_TYPE[] variable.
-   */
-  private ValueCompressionUtil.COMPRESSION_TYPE[] compType;
-
-  /**
-   * DataType[]  variable.
-   */
-  private ValueCompressionUtil.DataType[] changedDataType;
-  /**
-   * DataType[]  variable.
-   */
-  private ValueCompressionUtil.DataType[] actualDataType;
-
-  /**
-   * maxValue
-   */
-  private Object[] maxValue;
-  /**
-   * minValue.
-   */
-  private Object[] minValue;
-
-  private Object[] minValueFactForAgg;
-
-  /**
-   * uniqueValue
-   */
-  private Object[] uniqueValue;
-  /**
-   * decimal.
-   */
-  private int[] decimal;
-
-  /**
-   * aggType
-   */
-  private char[] type;
-
-  /**
-   * dataTypeSelected
-   */
-  private byte[] dataTypeSelected;
-  /**
-   * unCompressValues.
-   */
-  private ValueCompressonHolder.UnCompressValue[] unCompressValues;
-
-  /**
-   * @return the compType
-   */
-  public ValueCompressionUtil.COMPRESSION_TYPE[] getCompType() {
-    return compType;
-  }
-
-  /**
-   * @param compType the compType to set
-   */
-  public void setCompType(ValueCompressionUtil.COMPRESSION_TYPE[] compType) {
-    this.compType = compType;
-  }
-
-  /**
-   * @return the changedDataType
-   */
-  public ValueCompressionUtil.DataType[] getChangedDataType() {
-    return changedDataType;
-  }
-
-  /**
-   * @param changedDataType the changedDataType to set
-   */
-  public void setChangedDataType(ValueCompressionUtil.DataType[] changedDataType) {
-    this.changedDataType = changedDataType;
-  }
-
-  /**
-   * @return the actualDataType
-   */
-  public ValueCompressionUtil.DataType[] getActualDataType() {
-    return actualDataType;
-  }
-
-  /**
-   * @param actualDataType
-   */
-  public void setActualDataType(ValueCompressionUtil.DataType[] actualDataType) {
-    this.actualDataType = actualDataType;
-  }
-
-  /**
-   * @return the maxValue
-   */
-  public Object[] getMaxValue() {
-    return maxValue;
-  }
-
-  /**
-   * @param maxValue the maxValue to set
-   */
-  public void setMaxValue(Object[] maxValue) {
-    this.maxValue = maxValue;
-  }
-
-  /**
-   * @return the decimal
-   */
-  public int[] getDecimal() {
-    return decimal;
-  }
-
-  /**
-   * @param decimal the decimal to set
-   */
-  public void setDecimal(int[] decimal) {
-    this.decimal = decimal;
-  }
-
-  /**
-   * getUnCompressValues().
-   *
-   * @return the unCompressValues
-   */
-  public ValueCompressonHolder.UnCompressValue[] getUnCompressValues() {
-    return unCompressValues;
-  }
-
-  /**
-   * @param unCompressValues the unCompressValues to set
-   */
-  public void setUnCompressValues(ValueCompressonHolder.UnCompressValue[] unCompressValues) {
-    this.unCompressValues = unCompressValues;
-  }
-
-  /**
-   * getMinValue
-   *
-   * @return
-   */
-  public Object[] getMinValue() {
-    return minValue;
-  }
-
-  /**
-   * setMinValue.
-   *
-   * @param minValue
-   */
-  public void setMinValue(Object[] minValue) {
-    this.minValue = minValue;
-  }
-
-  /**
-   * @return the aggType
-   */
-  public char[] getType() {
-    return type;
-  }
-
-  /**
-   * @param type the type to set
-   */
-  public void setType(char[] type) {
-    this.type = type;
-  }
-
-  /**
-   * @return the dataTypeSelected
-   */
-  public byte[] getDataTypeSelected() {
-    return dataTypeSelected;
-  }
-
-  /**
-   * @param dataTypeSelected the dataTypeSelected to set
-   */
-  public void setDataTypeSelected(byte[] dataTypeSelected) {
-    this.dataTypeSelected = dataTypeSelected;
-  }
-
-  /**
-   * getUniqueValue
-   *
-   * @return
-   */
-  public Object[] getUniqueValue() {
-    return uniqueValue;
-  }
-
-  /**
-   * setUniqueValue
-   *
-   * @param uniqueValue
-   */
-  public void setUniqueValue(Object[] uniqueValue) {
-    this.uniqueValue = uniqueValue;
-  }
-
-  /**
-   * @return the minValueFactForAgg
-   */
-  public Object[] getMinValueFactForAgg() {
-    return minValueFactForAgg;
-  }
-
-  /**
-   * @param minValueFactForAgg the minValueFactForAgg to set
-   */
-  public void setMinValueFactForAgg(Object[] minValueFactForAgg) {
-    this.minValueFactForAgg = minValueFactForAgg;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/ValueCompressonHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/ValueCompressonHolder.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/ValueCompressonHolder.java
deleted file mode 100644
index 89bf334..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/ValueCompressonHolder.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression;
-
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-/**
- * ValueCompressonHolder class.
- */
-public final class ValueCompressonHolder {
-
-  /**
-   * byteCompressor.
-   */
-  private static Compressor<byte[]> byteCompressor =
-      SnappyCompression.SnappyByteCompression.INSTANCE;
-
-  /**
-   * shortCompressor.
-   */
-  private static Compressor<short[]> shortCompressor =
-      SnappyCompression.SnappyShortCompression.INSTANCE;
-
-  /**
-   * intCompressor.
-   */
-  private static Compressor<int[]> intCompressor = SnappyCompression.SnappyIntCompression.INSTANCE;
-
-  /**
-   * longCompressor.
-   */
-  private static Compressor<long[]> longCompressor =
-      SnappyCompression.SnappyLongCompression.INSTANCE;
-
-  /**
-   * floatCompressor
-   */
-  private static Compressor<float[]> floatCompressor =
-      SnappyCompression.SnappyFloatCompression.INSTANCE;
-  /**
-   * doubleCompressor.
-   */
-  private static Compressor<double[]> doubleCompressor =
-      SnappyCompression.SnappyDoubleCompression.INSTANCE;
-
-  private ValueCompressonHolder() {
-
-  }
-
-  /**
-   * @param dataType
-   * @param value
-   * @param data
-   */
-  public static void unCompress(DataType dataType, UnCompressValue value, byte[] data) {
-    switch (dataType) {
-      case DATA_BYTE:
-
-        value.setValue(byteCompressor.unCompress(data));
-        break;
-
-      case DATA_SHORT:
-
-        value.setValue(shortCompressor.unCompress(data));
-        break;
-
-      case DATA_INT:
-
-        value.setValue(intCompressor.unCompress(data));
-        break;
-
-      case DATA_LONG:
-      case DATA_BIGINT:
-
-        value.setValue(longCompressor.unCompress(data));
-        break;
-
-      case DATA_FLOAT:
-
-        value.setValue(floatCompressor.unCompress(data));
-        break;
-      default:
-
-        value.setValue(doubleCompressor.unCompress(data));
-        break;
-
-    }
-  }
-
-  /**
-   * interface for  UnCompressValue<T>.
-   *
-   * @param <T>
-   */
-
-  public interface UnCompressValue<T> extends Cloneable {
-    //        Object getValue(int index, int decimal, double maxValue);
-
-    void setValue(T value);
-
-    void setValueInBytes(byte[] value);
-
-    UnCompressValue<T> getNew();
-
-    UnCompressValue compress();
-
-    UnCompressValue uncompress(DataType dataType);
-
-    byte[] getBackArrayData();
-
-    UnCompressValue getCompressorObject();
-
-    CarbonReadDataHolder getValues(int decimal, Object maxValue);
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressByteArray.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressByteArray.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressByteArray.java
deleted file mode 100644
index 34de084..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressByteArray.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.math.BigDecimal;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.DataTypeUtil;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressByteArray implements ValueCompressonHolder.UnCompressValue<byte[]> {
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressMaxMinByte.class.getName());
-  /**
-   * byteCompressor.
-   */
-  private static Compressor<byte[]> byteCompressor =
-      SnappyCompression.SnappyByteCompression.INSTANCE;
-  private ByteArrayType arrayType;
-  /**
-   * value.
-   */
-  private byte[] value;
-
-  public UnCompressByteArray(ByteArrayType type) {
-    if (type == ByteArrayType.BYTE_ARRAY) {
-      arrayType = ByteArrayType.BYTE_ARRAY;
-    } else {
-      arrayType = ByteArrayType.BIG_DECIMAL;
-    }
-
-  }
-
-  @Override public void setValue(byte[] value) {
-    this.value = value;
-
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    this.value = value;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue<byte[]> getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressByteArray byte1 = new UnCompressByteArray(arrayType);
-    byte1.setValue(byteCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override
-  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
-    ValueCompressonHolder.UnCompressValue byte1 = new UnCompressByteArray(arrayType);
-    byte1.setValue(byteCompressor.unCompress(value));
-    return byte1;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return this.value;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressByteArray(arrayType);
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    List<byte[]> valsList = new ArrayList<byte[]>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    buffer.rewind();
-    int length = 0;
-    byte[] actualValue = null;
-    //CHECKSTYLE:OFF    Approval No:Approval-367
-    while (buffer.hasRemaining()) {//CHECKSTYLE:ON
-      length = buffer.getInt();
-      actualValue = new byte[length];
-      buffer.get(actualValue);
-      valsList.add(actualValue);
-
-    }
-    CarbonReadDataHolder holder = new CarbonReadDataHolder();
-    byte[][] value = new byte[valsList.size()][];
-    valsList.toArray(value);
-    if (arrayType == ByteArrayType.BIG_DECIMAL) {
-      BigDecimal[] bigDecimalValues = new BigDecimal[value.length];
-      for (int i = 0; i < value.length; i++) {
-        bigDecimalValues[i] = DataTypeUtil.byteToBigDecimal(value[i]);
-      }
-      holder.setReadableBigDecimalValues(bigDecimalValues);
-      return holder;
-    }
-    holder.setReadableByteValues(value);
-    return holder;
-  }
-
-  public static enum ByteArrayType {
-    BYTE_ARRAY,
-    BIG_DECIMAL
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressDefaultLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressDefaultLong.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressDefaultLong.java
deleted file mode 100644
index 6404027..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressDefaultLong.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.datastorage.store.compression.type;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-
-public class UnCompressDefaultLong extends UnCompressNoneLong {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressDefaultLong.class.getName());
-
-  public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException clnNotSupportedExc) {
-      LOGGER.error(clnNotSupportedExc,
-          clnNotSupportedExc.getMessage());
-    }
-    return null;
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    long[] vals = new long[value.length];
-    for (int i = 0; i < vals.length; i++) {
-      vals[i] = value[i];
-    }
-    dataHolder.setReadableLongValues(vals);
-    return dataHolder;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByte.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByte.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByte.java
deleted file mode 100644
index 9f2db9f..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByte.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressMaxMinByte implements UnCompressValue<byte[]> {
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressMaxMinByte.class.getName());
-  /**
-   * byteCompressor.
-   */
-  private static Compressor<byte[]> byteCompressor =
-      SnappyCompression.SnappyByteCompression.INSTANCE;
-  /**
-   * value.
-   */
-  protected byte[] value;
-
-  //TODO SIMIAN
-
-  @Override public void setValue(byte[] value) {
-    this.value = value;
-
-  }
-
-  @Override public UnCompressValue getNew() {
-    try {
-      return (UnCompressValue) clone();
-    } catch (CloneNotSupportedException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return null;
-  }
-
-  @Override public UnCompressValue compress() {
-
-    UnCompressMaxMinByte byte1 = new UnCompressMaxMinByte();
-    byte1.setValue(byteCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public UnCompressValue uncompress(DataType dataType) {
-    UnCompressValue byte1 = ValueCompressionUtil.unCompressMaxMin(dataType, dataType);
-    ValueCompressonHolder.unCompress(dataType, byte1, value);
-    return byte1;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return value;
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    this.value = value;
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public UnCompressValue getCompressorObject() {
-    return new UnCompressMaxMinByte();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = maxValue - value[i];
-      }
-    }
-    dataHolder.setReadableDoubleValues(vals);
-    return dataHolder;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByteForLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByteForLong.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByteForLong.java
deleted file mode 100644
index c97f8be..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByteForLong.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.datastorage.store.compression.type;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public class UnCompressMaxMinByteForLong extends UnCompressMaxMinByte {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressMaxMinByteForLong.class.getName());
-  private static Compressor<byte[]> byteCompressor =
-      SnappyCompression.SnappyByteCompression.INSTANCE;
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException e) {
-      LOGGER.error(e, e.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-
-    UnCompressMaxMinByteForLong byte1 = new UnCompressMaxMinByteForLong();
-    byte1.setValue(byteCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override
-  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
-    ValueCompressonHolder.UnCompressValue byte1 =
-        ValueCompressionUtil.unCompressMaxMin(dataType, dataType);
-    ValueCompressonHolder.unCompress(dataType, byte1, value);
-    return byte1;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressMaxMinByteForLong();
-  }
-
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    long maxValue = (long) maxValueObject;
-    long[] vals = new long[value.length];
-    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = maxValue - value[i];
-      }
-    }
-    dataHolder.setReadableLongValues(vals);
-    return dataHolder;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefault.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefault.java b/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefault.java
deleted file mode 100644
index 5713541..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefault.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.compression.type;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-import org.carbondata.core.util.ValueCompressionUtil.DataType;
-
-public class UnCompressMaxMinDefault implements ValueCompressonHolder.UnCompressValue<double[]> {
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(UnCompressMaxMinDefault.class.getName());
-
-  /**
-   * doubleCompressor.
-   */
-  private static Compressor<double[]> doubleCompressor =
-      SnappyCompression.SnappyDoubleCompression.INSTANCE;
-  /**
-   * value.
-   */
-  private double[] value;
-
-  @Override public void setValue(double[] value) {
-    this.value = (double[]) value;
-
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue getNew() {
-    try {
-      return (ValueCompressonHolder.UnCompressValue) clone();
-    } catch (CloneNotSupportedException ex5) {
-      LOGGER.error(ex5, ex5.getMessage());
-    }
-    return null;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue compress() {
-    UnCompressMaxMinByte byte1 = new UnCompressMaxMinByte();
-    byte1.setValue(doubleCompressor.compress(value));
-    return byte1;
-  }
-
-  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
-    return null;
-  }
-
-  @Override public byte[] getBackArrayData() {
-    return ValueCompressionUtil.convertToBytes(value);
-  }
-
-  @Override public void setValueInBytes(byte[] value) {
-    ByteBuffer buffer = ByteBuffer.wrap(value);
-    this.value = ValueCompressionUtil.convertToDoubleArray(buffer, value.length);
-  }
-
-  /**
-   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
-   */
-  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
-    return new UnCompressMaxMinByte();
-  }
-
-  //TODO SIMIAN
-  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
-    double maxValue = (double) maxValueObject;
-    double[] vals = new double[value.length];
-    CarbonReadDataHolder dataHolderInfoObj = new CarbonReadDataHolder();
-    for (int i = 0; i < vals.length; i++) {
-      if (value[i] == 0) {
-        vals[i] = maxValue;
-      } else {
-        vals[i] = maxValue - value[i];
-      }
-
-    }
-    dataHolderInfoObj.setReadableDoubleValues(vals);
-    return dataHolderInfoObj;
-  }
-
-}


[24/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractDictionaryCache.java b/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
deleted file mode 100644
index 390bcc8..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractDictionaryCache.java
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.carbondata.common.factory.CarbonCommonFactory;
-import org.carbondata.core.cache.Cache;
-import org.carbondata.core.cache.CacheType;
-import org.carbondata.core.cache.CarbonLRUCache;
-import org.carbondata.core.carbon.path.CarbonTablePath;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.filesystem.CarbonFile;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.reader.CarbonDictionaryColumnMetaChunk;
-import org.carbondata.core.reader.CarbonDictionaryMetadataReader;
-import org.carbondata.core.service.DictionaryService;
-import org.carbondata.core.service.PathService;
-import org.carbondata.core.util.CarbonProperties;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.core.util.CarbonUtilException;
-
-/**
- * Abstract class which implements methods common to reverse and forward dictionary cache
- */
-public abstract class AbstractDictionaryCache<K extends DictionaryColumnUniqueIdentifier,
-    V extends Dictionary>
-    implements Cache<DictionaryColumnUniqueIdentifier, Dictionary> {
-
-  /**
-   * thread pool size to be used for dictionary data reading
-   */
-  protected int thread_pool_size;
-
-  /**
-   * LRU cache variable
-   */
-  protected CarbonLRUCache carbonLRUCache;
-
-  /**
-   * c store path
-   */
-  protected String carbonStorePath;
-
-  /**
-   * @param carbonStorePath
-   * @param carbonLRUCache
-   */
-  public AbstractDictionaryCache(String carbonStorePath, CarbonLRUCache carbonLRUCache) {
-    this.carbonStorePath = carbonStorePath;
-    this.carbonLRUCache = carbonLRUCache;
-    initThreadPoolSize();
-  }
-
-  /**
-   * This method will initialize the thread pool size to be used for creating the
-   * max number of threads for a job
-   */
-  private void initThreadPoolSize() {
-    try {
-      thread_pool_size = Integer.parseInt(CarbonProperties.getInstance()
-          .getProperty(CarbonCommonConstants.NUM_CORES_LOADING,
-              CarbonCommonConstants.NUM_CORES_DEFAULT_VAL));
-    } catch (NumberFormatException e) {
-      thread_pool_size = Integer.parseInt(CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
-    }
-  }
-
-  /**
-   * This method will check if dictionary and its metadata file exists for a given column
-   *
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   * @return
-   */
-  protected boolean isFileExistsForGivenColumn(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
-    PathService pathService = CarbonCommonFactory.getPathService();
-    CarbonTablePath carbonTablePath = pathService
-        .getCarbonTablePath(dictionaryColumnUniqueIdentifier.getColumnIdentifier(), carbonStorePath,
-            dictionaryColumnUniqueIdentifier.getCarbonTableIdentifier());
-
-    String dictionaryFilePath =
-        carbonTablePath.getDictionaryFilePath(dictionaryColumnUniqueIdentifier
-            .getColumnIdentifier().getColumnId());
-    String dictionaryMetadataFilePath =
-        carbonTablePath.getDictionaryMetaFilePath(dictionaryColumnUniqueIdentifier
-            .getColumnIdentifier().getColumnId());
-    // check if both dictionary and its metadata file exists for a given column
-    return CarbonUtil.isFileExists(dictionaryFilePath) && CarbonUtil
-        .isFileExists(dictionaryMetadataFilePath);
-  }
-
-  /**
-   * This method will read dictionary metadata file and return the dictionary meta chunks
-   *
-   * @param dictionaryColumnUniqueIdentifier
-   * @return list of dictionary metadata chunks
-   * @throws IOException read and close method throws IO exception
-   */
-  protected CarbonDictionaryColumnMetaChunk readLastChunkFromDictionaryMetadataFile(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) throws IOException {
-    DictionaryService dictService = CarbonCommonFactory.getDictionaryService();
-    CarbonDictionaryMetadataReader columnMetadataReaderImpl = dictService
-        .getDictionaryMetadataReader(dictionaryColumnUniqueIdentifier.getCarbonTableIdentifier(),
-            dictionaryColumnUniqueIdentifier.getColumnIdentifier(), carbonStorePath);
-
-    CarbonDictionaryColumnMetaChunk carbonDictionaryColumnMetaChunk = null;
-    // read metadata file
-    try {
-      carbonDictionaryColumnMetaChunk =
-          columnMetadataReaderImpl.readLastEntryOfDictionaryMetaChunk();
-    } finally {
-      // close the metadata reader
-      columnMetadataReaderImpl.close();
-    }
-    return carbonDictionaryColumnMetaChunk;
-  }
-
-  /**
-   * This method will validate dictionary metadata file for any modification
-   *
-   * @param carbonFile
-   * @param fileTimeStamp
-   * @param endOffset
-   * @return
-   */
-  private boolean isDictionaryMetaFileModified(CarbonFile carbonFile, long fileTimeStamp,
-      long endOffset) {
-    return carbonFile.isFileModified(fileTimeStamp, endOffset);
-  }
-
-  /**
-   * This method will return the carbon file objetc based on its type (local, HDFS)
-   *
-   * @param dictionaryColumnUniqueIdentifier
-   * @return
-   */
-  private CarbonFile getDictionaryMetaCarbonFile(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
-    PathService pathService = CarbonCommonFactory.getPathService();
-    CarbonTablePath carbonTablePath = pathService
-        .getCarbonTablePath(dictionaryColumnUniqueIdentifier.getColumnIdentifier(), carbonStorePath,
-            dictionaryColumnUniqueIdentifier.getCarbonTableIdentifier());
-    String dictionaryFilePath =
-        carbonTablePath.getDictionaryMetaFilePath(dictionaryColumnUniqueIdentifier
-            .getColumnIdentifier().getColumnId());
-    FileFactory.FileType fileType = FileFactory.getFileType(dictionaryFilePath);
-    CarbonFile carbonFile = FileFactory.getCarbonFile(dictionaryFilePath, fileType);
-    return carbonFile;
-  }
-
-  /**
-   * This method will get the value for the given key. If value does not exist
-   * for the given key, it will check and load the value.
-   *
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   * @param dictionaryInfo
-   * @param lruCacheKey
-   * @param loadSortIndex                    read and load sort index file in memory
-   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
-   */
-  protected void checkAndLoadDictionaryData(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier,
-      DictionaryInfo dictionaryInfo, String lruCacheKey, boolean loadSortIndex)
-      throws CarbonUtilException {
-    try {
-      // read last segment dictionary meta chunk entry to get the end offset of file
-      CarbonFile carbonFile = getDictionaryMetaCarbonFile(dictionaryColumnUniqueIdentifier);
-      boolean dictionaryMetaFileModified =
-          isDictionaryMetaFileModified(carbonFile, dictionaryInfo.getFileTimeStamp(),
-              dictionaryInfo.getDictionaryMetaFileLength());
-      // if dictionary metadata file is modified then only read the last entry from dictionary
-      // meta file
-      if (dictionaryMetaFileModified) {
-        synchronized (dictionaryInfo) {
-          carbonFile = getDictionaryMetaCarbonFile(dictionaryColumnUniqueIdentifier);
-          dictionaryMetaFileModified =
-              isDictionaryMetaFileModified(carbonFile, dictionaryInfo.getFileTimeStamp(),
-                  dictionaryInfo.getDictionaryMetaFileLength());
-          // Double Check :
-          // if dictionary metadata file is modified then only read the last entry from dictionary
-          // meta file
-          if (dictionaryMetaFileModified) {
-            CarbonDictionaryColumnMetaChunk carbonDictionaryColumnMetaChunk =
-                readLastChunkFromDictionaryMetadataFile(dictionaryColumnUniqueIdentifier);
-            // required size will be size total size of file - offset till file is
-            // already read
-            long requiredSize =
-                carbonDictionaryColumnMetaChunk.getEnd_offset() - dictionaryInfo.getMemorySize();
-            if (requiredSize > 0) {
-              boolean columnAddedToLRUCache =
-                  carbonLRUCache.put(lruCacheKey, dictionaryInfo, requiredSize);
-              // if column is successfully added to lru cache then only load the
-              // dictionary data
-              if (columnAddedToLRUCache) {
-                // load dictionary data
-                loadDictionaryData(dictionaryInfo, dictionaryColumnUniqueIdentifier,
-                    dictionaryInfo.getMemorySize(), carbonDictionaryColumnMetaChunk.getEnd_offset(),
-                    loadSortIndex);
-                // set the end offset till where file is read
-                dictionaryInfo
-                    .setOffsetTillFileIsRead(carbonDictionaryColumnMetaChunk.getEnd_offset());
-                dictionaryInfo.setFileTimeStamp(carbonFile.getLastModifiedTime());
-                dictionaryInfo.setDictionaryMetaFileLength(carbonFile.getSize());
-              } else {
-                throw new CarbonUtilException(
-                    "Cannot load dictionary into memory. Not enough memory available");
-              }
-            }
-          }
-        }
-      }
-      // increment the column access count
-      incrementDictionaryAccessCount(dictionaryInfo);
-    } catch (IOException e) {
-      throw new CarbonUtilException(e.getMessage());
-    }
-  }
-
-  /**
-   * This method will prepare the lru cache key and return the same
-   *
-   * @param columnIdentifier
-   * @return
-   */
-  protected String getLruCacheKey(String columnIdentifier, CacheType cacheType) {
-    String lruCacheKey =
-        columnIdentifier + CarbonCommonConstants.UNDERSCORE + cacheType.getCacheName();
-    return lruCacheKey;
-  }
-
-  /**
-   * This method will check and load the dictionary file in memory for a given column
-   *
-   * @param dictionaryInfo                   holds dictionary information and data
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   * @param dictionaryChunkStartOffset       start offset from where dictionary file has to
-   *                                         be read
-   * @param dictionaryChunkEndOffset         end offset till where dictionary file has to
-   *                                         be read
-   * @param loadSortIndex
-   * @throws IOException
-   */
-  private void loadDictionaryData(DictionaryInfo dictionaryInfo,
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier,
-      long dictionaryChunkStartOffset, long dictionaryChunkEndOffset, boolean loadSortIndex)
-      throws IOException {
-    DictionaryCacheLoader dictionaryCacheLoader =
-        new DictionaryCacheLoaderImpl(dictionaryColumnUniqueIdentifier.getCarbonTableIdentifier(),
-            carbonStorePath);
-    dictionaryCacheLoader
-        .load(dictionaryInfo, dictionaryColumnUniqueIdentifier.getColumnIdentifier(),
-            dictionaryChunkStartOffset, dictionaryChunkEndOffset, loadSortIndex);
-  }
-
-  /**
-   * This method will increment the access count for a given dictionary column
-   *
-   * @param dictionaryInfo
-   */
-  protected void incrementDictionaryAccessCount(DictionaryInfo dictionaryInfo) {
-    dictionaryInfo.incrementAccessCount();
-  }
-
-  /**
-   * This method will update the dictionary acceess count which is required for its removal
-   * from column LRU cache
-   *
-   * @param dictionaryList
-   */
-  protected void clearDictionary(List<Dictionary> dictionaryList) {
-    for (Dictionary dictionary : dictionaryList) {
-      dictionary.clear();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/ColumnDictionaryInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/ColumnDictionaryInfo.java b/core/src/main/java/org/carbondata/core/cache/dictionary/ColumnDictionaryInfo.java
deleted file mode 100644
index 59bafb0..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/ColumnDictionaryInfo.java
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.nio.charset.Charset;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.core.util.CarbonProperties;
-
-/**
- * class that implements methods specific for dictionary data look up
- */
-public class ColumnDictionaryInfo extends AbstractColumnDictionaryInfo {
-
-  /**
-   * index after members are sorted
-   */
-  private AtomicReference<List<Integer>> sortOrderReference =
-      new AtomicReference<List<Integer>>(new ArrayList<Integer>());
-
-  /**
-   * inverted index to retrieve the member
-   */
-  private AtomicReference<List<Integer>> sortReverseOrderReference =
-      new AtomicReference<List<Integer>>(new ArrayList<Integer>());
-
-  private DataType dataType;
-
-  public ColumnDictionaryInfo(DataType dataType) {
-    this.dataType = dataType;
-  }
-
-  /**
-   * This method will find and return the surrogate key for a given dictionary value
-   * Applicable scenario:
-   * 1. Incremental data load : Dictionary will not be generated for existing values. For
-   * that values have to be looked up in the existing dictionary cache.
-   * 2. Filter scenarios where from value surrogate key has to be found.
-   *
-   * @param value dictionary value as byte array
-   * @return if found returns key else 0
-   */
-  @Override public int getSurrogateKey(byte[] value) {
-    return getSurrogateKeyFromDictionaryValue(value);
-  }
-
-  /**
-   * This method will find and return the sort index for a given dictionary id.
-   * Applicable scenarios:
-   * 1. Used in case of order by queries when data sorting is required
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return if found returns key else 0
-   */
-  @Override public int getSortedIndex(int surrogateKey) {
-    if (surrogateKey > sortReverseOrderReference.get().size()
-        || surrogateKey < MINIMUM_SURROGATE_KEY) {
-      return -1;
-    }
-    // decrement surrogate key as surrogate key basically means the index in array list
-    // because surrogate key starts from 1 and index of list from 0, so it needs to be
-    // decremented by 1
-    return sortReverseOrderReference.get().get(surrogateKey - 1);
-  }
-
-  /**
-   * This method will find and return the dictionary value from sorted index.
-   * Applicable scenarios:
-   * 1. Query final result preparation in case of order by queries:
-   * While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param sortedIndex sort index of dictionary value
-   * @return value if found else null
-   */
-  @Override public String getDictionaryValueFromSortedIndex(int sortedIndex) {
-    if (sortedIndex > sortReverseOrderReference.get().size()
-        || sortedIndex < MINIMUM_SURROGATE_KEY) {
-      return null;
-    }
-    // decrement surrogate key as surrogate key basically means the index in array list
-    // because surrogate key starts from 1, sort index will start form 1 and index
-    // of list from 0, so it needs to be decremented by 1
-    int surrogateKey = sortOrderReference.get().get(sortedIndex - 1);
-    return getDictionaryValueForKey(surrogateKey);
-  }
-
-  /**
-   * This method will add a new dictionary chunk to existing list of dictionary chunks
-   *
-   * @param dictionaryChunk
-   */
-  @Override public void addDictionaryChunk(List<byte[]> dictionaryChunk) {
-    dictionaryChunks.add(dictionaryChunk);
-  }
-
-  /**
-   * This method will set the sort order index of a dictionary column.
-   * Sort order index if the index of dictionary values after they are sorted.
-   *
-   * @param sortOrderIndex
-   */
-  @Override public void setSortOrderIndex(List<Integer> sortOrderIndex) {
-    sortOrderReference.set(sortOrderIndex);
-  }
-
-  /**
-   * This method will set the sort reverse index of a dictionary column.
-   * Sort reverse index is the index of dictionary values before they are sorted.
-   *
-   * @param sortReverseOrderIndex
-   */
-  @Override public void setSortReverseOrderIndex(List<Integer> sortReverseOrderIndex) {
-    sortReverseOrderReference.set(sortReverseOrderIndex);
-  }
-
-  /**
-   * This method will apply binary search logic to find the surrogate key for the
-   * given value
-   *
-   * @param key to be searched
-   * @return
-   */
-  private int getSurrogateKeyFromDictionaryValue(byte[] key) {
-    String filterKey = new String(key, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
-    int low = 0;
-    List<Integer> sortedSurrogates = sortOrderReference.get();
-    int high = sortedSurrogates.size() - 1;
-    while (low <= high) {
-      int mid = (low + high) >>> 1;
-      int surrogateKey = sortedSurrogates.get(mid);
-      byte[] dictionaryValue = getDictionaryBytesFromSurrogate(surrogateKey);
-      int cmp = -1;
-      if (this.getDataType() != DataType.STRING) {
-        cmp = compareFilterKeyWithDictionaryKey(new String(dictionaryValue), filterKey,
-            this.getDataType());
-
-      } else {
-        cmp = ByteUtil.UnsafeComparer.INSTANCE.compareTo(dictionaryValue, key);
-      }
-      if (cmp < 0) {
-        low = mid + 1;
-      } else if (cmp > 0) {
-        high = mid - 1;
-      } else {
-        return surrogateKey; // key found
-      }
-    }
-    return 0;
-  }
-
-  /**
-   * This method will apply binary search logic to find the surrogate key for the
-   * given value
-   *
-   * @param byteValuesOfFilterMembers to be searched
-   * @param surrogates
-   * @return
-   */
-  public void getIncrementalSurrogateKeyFromDictionary(List<byte[]> byteValuesOfFilterMembers,
-      List<Integer> surrogates) {
-    List<Integer> sortedSurrogates = sortOrderReference.get();
-    int low = 0;
-    for (byte[] byteValueOfFilterMember : byteValuesOfFilterMembers) {
-      String filterKey = new String(byteValueOfFilterMember,
-          Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
-      if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(filterKey)) {
-        surrogates.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL_SURROGATE_KEY);
-        continue;
-      }
-      int high = sortedSurrogates.size() - 1;
-      while (low <= high) {
-        int mid = (low + high) >>> 1;
-        int surrogateKey = sortedSurrogates.get(mid);
-        byte[] dictionaryValue = getDictionaryBytesFromSurrogate(surrogateKey);
-        int cmp = -1;
-        //fortify fix
-        if (null == dictionaryValue) {
-          cmp = -1;
-        } else if (this.getDataType() != DataType.STRING) {
-          cmp = compareFilterKeyWithDictionaryKey(new String(dictionaryValue), filterKey,
-              this.getDataType());
-
-        } else {
-          cmp =
-              ByteUtil.UnsafeComparer.INSTANCE.compareTo(dictionaryValue, byteValueOfFilterMember);
-        }
-        if (cmp < 0) {
-          low = mid + 1;
-        } else if (cmp > 0) {
-          high = mid - 1;
-        } else {
-
-          surrogates.add(surrogateKey);
-          low = mid;
-          break;
-        }
-      }
-    }
-    //Default value has to be added
-    if (surrogates.isEmpty()) {
-      surrogates.add(0);
-    }
-  }
-
-  private int compareFilterKeyWithDictionaryKey(String dictionaryVal, String memberVal,
-      DataType dataType) {
-    try {
-      switch (dataType) {
-        case SHORT:
-          return Short.compare((Short.parseShort(dictionaryVal)), (Short.parseShort(memberVal)));
-        case INT:
-          return Integer.compare((Integer.parseInt(dictionaryVal)), (Integer.parseInt(memberVal)));
-        case DOUBLE:
-          return Double
-              .compare((Double.parseDouble(dictionaryVal)), (Double.parseDouble(memberVal)));
-        case LONG:
-          return Long.compare((Long.parseLong(dictionaryVal)), (Long.parseLong(memberVal)));
-        case BOOLEAN:
-          return Boolean
-              .compare((Boolean.parseBoolean(dictionaryVal)), (Boolean.parseBoolean(memberVal)));
-        case TIMESTAMP:
-          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
-              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-          Date dateToStr;
-          Date dictionaryDate;
-          dateToStr = parser.parse(memberVal);
-          dictionaryDate = parser.parse(dictionaryVal);
-          return dictionaryDate.compareTo(dateToStr);
-        case DECIMAL:
-          java.math.BigDecimal javaDecValForDictVal = new java.math.BigDecimal(dictionaryVal);
-          java.math.BigDecimal javaDecValForMemberVal = new java.math.BigDecimal(memberVal);
-          return javaDecValForDictVal.compareTo(javaDecValForMemberVal);
-        default:
-          return -1;
-      }
-    } catch (Exception e) {
-      //In all data types excluding String data type the null member will be the highest
-      //while doing search in dictioary when the member comparison happens with filter member
-      //which is also null member, since the parsing fails in other data type except string
-      //explicit comparison is required, is both are null member then system has to return 0.
-      if (memberVal.equals(dictionaryVal)) {
-        return 0;
-      }
-      return 1;
-    }
-  }
-
-  /**
-   * getDataType().
-   *
-   * @return
-   */
-  public DataType getDataType() {
-    return dataType;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/ColumnReverseDictionaryInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/ColumnReverseDictionaryInfo.java b/core/src/main/java/org/carbondata/core/cache/dictionary/ColumnReverseDictionaryInfo.java
deleted file mode 100644
index 86ff971..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/ColumnReverseDictionaryInfo.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.CarbonProperties;
-
-import net.jpountz.xxhash.XXHash32;
-import net.jpountz.xxhash.XXHashFactory;
-
-/**
- * class that implements methods specific for dictionary data look up
- */
-public class ColumnReverseDictionaryInfo extends AbstractColumnDictionaryInfo {
-
-  /**
-   * Map which will maintain mapping of byte array to surrogate key
-   */
-  private Map<DictionaryByteArrayWrapper, Integer> dictionaryByteArrayToSurrogateKeyMap;
-
-  /**
-   * hashing algorithm to calculate hash code
-   */
-  private XXHash32 xxHash32;
-
-  /**
-   * check and initialize xxHash32 if enabled
-   */
-  public ColumnReverseDictionaryInfo() {
-    boolean useXXHash = Boolean.valueOf(CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.ENABLE_XXHASH,
-            CarbonCommonConstants.ENABLE_XXHASH_DEFAULT));
-    if (useXXHash) {
-      xxHash32 = XXHashFactory.fastestInstance().hash32();
-    }
-  }
-
-  /**
-   * This method will find and return the surrogate key for a given dictionary value
-   * Applicable scenario:
-   * 1. Incremental data load : Dictionary will not be generated for existing values. For
-   * that values have to be looked up in the existing dictionary cache.
-   * 2. Filter scenarios where from value surrogate key has to be found.
-   *
-   * @param value dictionary value as byte array. It will be treated as key here
-   * @return if found returns key else 0
-   */
-  @Override public int getSurrogateKey(byte[] value) {
-    DictionaryByteArrayWrapper dictionaryByteArrayWrapper =
-        new DictionaryByteArrayWrapper(value, xxHash32);
-    Integer surrogateKeyInMap =
-        dictionaryByteArrayToSurrogateKeyMap.get(dictionaryByteArrayWrapper);
-    if (null == surrogateKeyInMap) {
-      return CarbonCommonConstants.INVALID_SURROGATE_KEY;
-    }
-    return surrogateKeyInMap;
-  }
-
-  /**
-   * This method will add a new dictionary chunk to existing list of dictionary chunks
-   *
-   * @param dictionaryChunk
-   */
-  @Override public void addDictionaryChunk(List<byte[]> dictionaryChunk) {
-    dictionaryChunks.add(dictionaryChunk);
-    if (null == dictionaryByteArrayToSurrogateKeyMap) {
-      createDictionaryByteArrayToSurrogateKeyMap(dictionaryChunk.size());
-    }
-    addDataToDictionaryMap();
-  }
-
-  /**
-   * This method will add the new dictionary data to map
-   */
-  private void addDataToDictionaryMap() {
-    int surrogateKey = dictionaryByteArrayToSurrogateKeyMap.size();
-    List<byte[]> oneDictionaryChunk = dictionaryChunks.get(dictionaryChunks.size() - 1);
-    for (int i = 0; i < oneDictionaryChunk.size(); i++) {
-      // create a wrapper class that will calculate hash code for byte array
-      DictionaryByteArrayWrapper dictionaryByteArrayWrapper =
-          new DictionaryByteArrayWrapper(oneDictionaryChunk.get(i), xxHash32);
-      dictionaryByteArrayToSurrogateKeyMap.put(dictionaryByteArrayWrapper, ++surrogateKey);
-    }
-  }
-
-  /**
-   * This method will create the dictionary map. First time it will
-   * create dictionary map with capacity equal to list of byte arrays
-   *
-   * @param initialMapSize capacity to which map is to be instantiated
-   */
-  private void createDictionaryByteArrayToSurrogateKeyMap(int initialMapSize) {
-    dictionaryByteArrayToSurrogateKeyMap = new ConcurrentHashMap<>(initialMapSize);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/Dictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/Dictionary.java b/core/src/main/java/org/carbondata/core/cache/dictionary/Dictionary.java
deleted file mode 100644
index 3f395c1..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/Dictionary.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-/**
- * dictionary interface which declares methods for finding surrogate key for a
- * given dictionary value and finding dictionary value from a given surrogate key
- */
-public interface Dictionary {
-
-  /**
-   * This method will find and return the surrogate key for a given dictionary value
-   * Applicable scenario:
-   * 1. Incremental data load : Dictionary will not be generated for existing values. For
-   * that values have to be looked up in the existing dictionary cache.
-   * 2. Filter scenarios where from value surrogate key has to be found.
-   *
-   * @param value dictionary value
-   * @return if found returns key else 0
-   */
-  int getSurrogateKey(String value);
-
-  /**
-   * This method will find and return the surrogate key for a given dictionary value
-   * Applicable scenario:
-   * 1. Incremental data load : Dictionary will not be generated for existing values. For
-   * that values have to be looked up in the existing dictionary cache.
-   * 2. Filter scenarios where from value surrogate key has to be found.
-   *
-   * @param value dictionary value as byte array
-   * @return if found returns key else -1
-   */
-  int getSurrogateKey(byte[] value);
-
-  /**
-   * This method will find and return the dictionary value for a given surrogate key.
-   * Applicable scenarios:
-   * 1. Query final result preparation : While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return value if found else null
-   */
-  String getDictionaryValueForKey(int surrogateKey);
-
-  /**
-   * This method will find and return the sort index for a given dictionary id.
-   * Applicable scenarios:
-   * 1. Used in case of order by queries when data sorting is required
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return if found returns key else 0
-   */
-  int getSortedIndex(int surrogateKey);
-
-  /**
-   * This method will find and return the dictionary value from sorted index.
-   * Applicable scenarios:
-   * 1. Query final result preparation in case of order by queries:
-   * While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param sortedIndex sort index of dictionary value
-   * @return value if found else null
-   */
-  String getDictionaryValueFromSortedIndex(int sortedIndex);
-
-  /**
-   * The method return the dictionary chunks wrapper of a column
-   * The wrapper wraps the list<list<bye[]>> and provide the iterator to retrieve the chunks
-   * members.
-   * Applications Scenario:
-   * For preparing the column Sort info while writing the sort index file.
-   *
-   * @return
-   */
-  DictionaryChunksWrapper getDictionaryChunks();
-
-  /**
-   * This method will release the objects and set default value for primitive types
-   */
-  void clear();
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryByteArrayWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryByteArrayWrapper.java b/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryByteArrayWrapper.java
deleted file mode 100644
index ee8f991..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryByteArrayWrapper.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.util.Arrays;
-
-import org.carbondata.core.util.ByteUtil;
-
-import net.jpountz.xxhash.XXHash32;
-
-/**
- * class that holds the byte array and overrides equals and hashcode method which
- * will be useful for object comparison
- */
-public class DictionaryByteArrayWrapper {
-
-  /**
-   * dictionary value as byte array
-   */
-  private byte[] data;
-
-  /**
-   * hashing algorithm to calculate hash code
-   */
-  private XXHash32 xxHash32;
-
-  /**
-   * @param data
-   */
-  public DictionaryByteArrayWrapper(byte[] data) {
-    this.data = data;
-  }
-
-  /**
-   * @param data
-   * @param xxHash32
-   */
-  public DictionaryByteArrayWrapper(byte[] data, XXHash32 xxHash32) {
-    this(data);
-    this.xxHash32 = xxHash32;
-  }
-
-  /**
-   * This method will compare 2 DictionaryByteArrayWrapper objects
-   *
-   * @param other
-   * @return
-   */
-  @Override public boolean equals(Object other) {
-    if (this == other) {
-      return true;
-    }
-    if (other == null || getClass() != other.getClass()) {
-      return false;
-    }
-    DictionaryByteArrayWrapper otherObjectToCompare = (DictionaryByteArrayWrapper) other;
-    if (data.length != otherObjectToCompare.data.length) {
-      return false;
-    }
-    return ByteUtil.UnsafeComparer.INSTANCE.equals(data, otherObjectToCompare.data);
-
-  }
-
-  /**
-   * This method will calculate the hash code for given data
-   *
-   * @return
-   */
-  @Override public int hashCode() {
-    if (null != xxHash32) {
-      return xxHash32.hash(data, 0, data.length, 0);
-    }
-    int result = Arrays.hashCode(data);
-    result = 31 * result;
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryCacheLoader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryCacheLoader.java b/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryCacheLoader.java
deleted file mode 100644
index a28d58d..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryCacheLoader.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.io.IOException;
-
-import org.carbondata.core.carbon.ColumnIdentifier;
-
-public interface DictionaryCacheLoader {
-
-  /**
-   * This method will load the dictionary data for a given columnIdentifier
-   *
-   * @param dictionaryInfo             dictionary info object which will hold the required data
-   *                                   for a given column
-   * @param columnIdentifier           column unique identifier
-   * @param dictionaryChunkStartOffset start offset from where dictionary file has to
-   *                                   be read
-   * @param dictionaryChunkEndOffset   end offset till where dictionary file has to
-   *                                   be read
-   * @param loadSortIndex              flag to indicate whether the sort index file has to be
-   *                                   read in memory after dictionary loading
-   * @throws IOException
-   */
-  void load(DictionaryInfo dictionaryInfo, ColumnIdentifier columnIdentifier,
-      long dictionaryChunkStartOffset, long dictionaryChunkEndOffset, boolean loadSortIndex)
-      throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryCacheLoaderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryCacheLoaderImpl.java b/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryCacheLoaderImpl.java
deleted file mode 100644
index 770cb07..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryCacheLoaderImpl.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.carbondata.common.factory.CarbonCommonFactory;
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.reader.CarbonDictionaryReader;
-import org.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReader;
-import org.carbondata.core.service.DictionaryService;
-
-/**
- * This class is responsible for loading the dictionary data for given columns
- */
-public class DictionaryCacheLoaderImpl implements DictionaryCacheLoader {
-
-  /**
-   * carbon table identifier
-   */
-  private CarbonTableIdentifier carbonTableIdentifier;
-
-  /**
-   * carbon store path
-   */
-  private String carbonStorePath;
-
-  /**
-   * @param carbonTableIdentifier fully qualified table name
-   * @param carbonStorePath       hdfs store path
-   */
-  public DictionaryCacheLoaderImpl(CarbonTableIdentifier carbonTableIdentifier,
-      String carbonStorePath) {
-    this.carbonTableIdentifier = carbonTableIdentifier;
-    this.carbonStorePath = carbonStorePath;
-  }
-
-  /**
-   * This method will load the dictionary data for a given columnIdentifier
-   *
-   * @param dictionaryInfo             dictionary info object which will hold the required data
-   *                                   for a given column
-   * @param columnIdentifier           column unique identifier
-   * @param dictionaryChunkStartOffset start offset from where dictionary file has to
-   *                                   be read
-   * @param dictionaryChunkEndOffset   end offset till where dictionary file has to
-   *                                   be read
-   * @param loadSortIndex              flag to indicate whether the sort index file has to be
-   *                                   read in memory after dictionary loading
-   * @throws IOException
-   */
-  @Override public void load(DictionaryInfo dictionaryInfo, ColumnIdentifier columnIdentifier,
-      long dictionaryChunkStartOffset, long dictionaryChunkEndOffset, boolean loadSortIndex)
-      throws IOException {
-    List<byte[]> dictionaryChunk =
-        load(columnIdentifier, dictionaryChunkStartOffset, dictionaryChunkEndOffset);
-    if (loadSortIndex) {
-      readSortIndexFile(dictionaryInfo, columnIdentifier);
-    }
-    dictionaryInfo.addDictionaryChunk(dictionaryChunk);
-  }
-
-  /**
-   * This method will load the dictionary data between a given start and end offset
-   *
-   * @param columnIdentifier column unique identifier
-   * @param startOffset      start offset of dictionary file
-   * @param endOffset        end offset of dictionary file
-   * @return list of dictionary value
-   * @throws IOException
-   */
-  private List<byte[]> load(ColumnIdentifier columnIdentifier, long startOffset, long endOffset)
-      throws IOException {
-    CarbonDictionaryReader dictionaryReader = getDictionaryReader(columnIdentifier);
-    List<byte[]> dictionaryValue = null;
-    try {
-      dictionaryValue = dictionaryReader.read(startOffset, endOffset);
-    } finally {
-      dictionaryReader.close();
-    }
-    return dictionaryValue;
-  }
-
-  /**
-   * This method will read the sort index file and load into memory
-   *
-   * @param dictionaryInfo
-   * @param columnIdentifier
-   * @throws IOException
-   */
-  private void readSortIndexFile(DictionaryInfo dictionaryInfo, ColumnIdentifier columnIdentifier)
-      throws IOException {
-    CarbonDictionarySortIndexReader sortIndexReader = getSortIndexReader(columnIdentifier);
-    try {
-      dictionaryInfo.setSortOrderIndex(sortIndexReader.readSortIndex());
-      dictionaryInfo.setSortReverseOrderIndex(sortIndexReader.readInvertedSortIndex());
-    } finally {
-      sortIndexReader.close();
-    }
-  }
-
-  /**
-   * This method will create a dictionary reader instance to read the dictionary file
-   *
-   * @param columnIdentifier unique column identifier
-   * @return carbon dictionary reader instance
-   */
-  private CarbonDictionaryReader getDictionaryReader(ColumnIdentifier columnIdentifier) {
-    DictionaryService dictService = CarbonCommonFactory.getDictionaryService();
-    return dictService
-        .getDictionaryReader(carbonTableIdentifier, columnIdentifier, carbonStorePath);
-  }
-
-  /**
-   * @param columnIdentifier unique column identifier
-   * @return sort index reader instance
-   */
-  private CarbonDictionarySortIndexReader getSortIndexReader(ColumnIdentifier columnIdentifier) {
-    DictionaryService dictService = CarbonCommonFactory.getDictionaryService();
-    return dictService
-        .getDictionarySortIndexReader(carbonTableIdentifier, columnIdentifier, carbonStorePath);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryChunksWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryChunksWrapper.java b/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryChunksWrapper.java
deleted file mode 100644
index e88d722..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryChunksWrapper.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.cache.dictionary;
-
-import java.util.Iterator;
-import java.util.List;
-
-/**
- * The wrapper class wraps the list<list<bye[]>> and provide the iterator to retrieve the chunks
- * members and expose the getSize API to get size of members in the List<List<byte>> chunks.
- * Applications Scenario:
- * For preparing the column Sort info while writing the sort index file.
- */
-public class DictionaryChunksWrapper implements Iterator<byte[]> {
-
-  /**
-   * list of dictionaryChunks
-   */
-  private List<List<byte[]>> dictionaryChunks;
-
-  /**
-   * size of the list
-   */
-  private int size;
-
-  /**
-   * Current index of the list
-   */
-  private int currentIndex;
-
-  /**
-   * variable holds the count of elements already iterated
-   */
-  private int iteratorIndex;
-
-  /**
-   * variable holds the current index of List<List<byte[]>> being traversed
-   */
-  private int outerIndex;
-
-  /**
-   * Constructor of DictionaryChunksWrapper
-   *
-   * @param dictionaryChunks
-   */
-  public DictionaryChunksWrapper(List<List<byte[]>> dictionaryChunks) {
-    this.dictionaryChunks = dictionaryChunks;
-    for (List<byte[]> chunk : dictionaryChunks) {
-      this.size += chunk.size();
-    }
-  }
-
-  /**
-   * Returns {@code true} if the iteration has more elements.
-   * (In other words, returns {@code true} if {@link #next} would
-   * return an element rather than throwing an exception.)
-   *
-   * @return {@code true} if the iteration has more elements
-   */
-  @Override public boolean hasNext() {
-    return (currentIndex < size);
-  }
-
-  /**
-   * Returns the next element in the iteration.
-   * The method pics the next elements from the first inner list till first is not finished, pics
-   * the second inner list ...
-   *
-   * @return the next element in the iteration
-   */
-  @Override public byte[] next() {
-    if (iteratorIndex >= dictionaryChunks.get(outerIndex).size()) {
-      iteratorIndex = 0;
-      outerIndex++;
-    }
-    byte[] value = dictionaryChunks.get(outerIndex).get(iteratorIndex);
-    currentIndex++;
-    iteratorIndex++;
-    return value;
-  }
-
-  /**
-   * Removes from the underlying collection the last element returned
-   * by this iterator (optional operation).  This method can be called
-   * only once per call to {@link #next}.  The behavior of an iterator
-   * is unspecified if the underlying collection is modified while the
-   * iteration is in progress in any way other than by calling this
-   * method.
-   *
-   * @throws UnsupportedOperationException if the {@code remove}
-   *                                       operation is not supported by this iterator
-   * @throws IllegalStateException         if the {@code next} method has not
-   *                                       yet been called, or the {@code remove} method has already
-   *                                       been called after the last call to the {@code next}
-   *                                       method
-   * @implSpec The default implementation throws an instance of
-   * {@link UnsupportedOperationException} and performs no other action.
-   */
-  @Override public void remove() {
-    throw new UnsupportedOperationException("Remove operation not supported");
-  }
-
-  /**
-   * returns the total element size in List<List<byte[]>>
-   *
-   * @return
-   */
-  public int getSize() {
-    return size;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryColumnUniqueIdentifier.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryColumnUniqueIdentifier.java b/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryColumnUniqueIdentifier.java
deleted file mode 100644
index dea789f..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryColumnUniqueIdentifier.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-
-/**
- * dictionary column identifier which includes table identifier and column identifier
- */
-public class DictionaryColumnUniqueIdentifier {
-
-  /**
-   * table fully qualified name
-   */
-  private CarbonTableIdentifier carbonTableIdentifier;
-
-  /**
-   * unique column id
-   */
-  private ColumnIdentifier columnIdentifier;
-
-  private DataType dataType;
-
-  /**
-   * Will be used in case of reverse dictionary cache which will be used
-   * in case of data loading.
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   */
-  public DictionaryColumnUniqueIdentifier(CarbonTableIdentifier carbonTableIdentifier,
-      ColumnIdentifier columnIdentifier) {
-    this.carbonTableIdentifier = carbonTableIdentifier;
-    this.columnIdentifier = columnIdentifier;
-  }
-
-  /**
-   * Will be used in case of forward dictionary cache in case
-   * of query execution.
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param dataType
-   */
-  public DictionaryColumnUniqueIdentifier(CarbonTableIdentifier carbonTableIdentifier,
-      ColumnIdentifier columnIdentifier, DataType dataType) {
-    this(carbonTableIdentifier, columnIdentifier);
-    this.dataType = dataType;
-  }
-
-  public DataType getDataType() {
-    return dataType;
-  }
-
-  /**
-   * @return table identifier
-   */
-  public CarbonTableIdentifier getCarbonTableIdentifier() {
-    return carbonTableIdentifier;
-  }
-
-  /**
-   * @return columnIdentifier
-   */
-  public ColumnIdentifier getColumnIdentifier() {
-    return columnIdentifier;
-  }
-
-  /**
-   * overridden equals method
-   *
-   * @param other
-   * @return
-   */
-  @Override public boolean equals(Object other) {
-    if (this == other) return true;
-    if (other == null || getClass() != other.getClass()) return false;
-    DictionaryColumnUniqueIdentifier that = (DictionaryColumnUniqueIdentifier) other;
-    if (!carbonTableIdentifier.equals(that.carbonTableIdentifier)) return false;
-    return columnIdentifier.equals(that.columnIdentifier);
-
-  }
-
-  /**
-   * overridden hashcode method
-   *
-   * @return
-   */
-  @Override public int hashCode() {
-    int result = carbonTableIdentifier.hashCode();
-    result = 31 * result + columnIdentifier.hashCode();
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryInfo.java b/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryInfo.java
deleted file mode 100644
index 6721b3b..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/DictionaryInfo.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.util.List;
-
-import org.carbondata.core.cache.Cacheable;
-
-/**
- * An interface which holds dictionary information like end offset,
- * file timestamp for one column
- */
-public interface DictionaryInfo extends Cacheable, Dictionary {
-
-  /**
-   * This method will increment the access count for a column by 1
-   * whenever a column is getting used in query or incremental data load
-   */
-  void incrementAccessCount();
-
-  /**
-   * This method will update the end offset of file everytime a file is read
-   *
-   * @param offsetTillFileIsRead
-   */
-  void setOffsetTillFileIsRead(long offsetTillFileIsRead);
-
-  /**
-   * This method will update the timestamp of a file if a file is modified
-   * like in case of incremental load
-   *
-   * @param fileTimeStamp
-   */
-  void setFileTimeStamp(long fileTimeStamp);
-
-  /**
-   * This method will add a new dictionary chunk to existing list of dictionary chunks
-   *
-   * @param dictionaryChunk
-   */
-  void addDictionaryChunk(List<byte[]> dictionaryChunk);
-
-  /**
-   * This method will set the sort order index of a dictionary column.
-   * Sort order index if the index of dictionary values after they are sorted.
-   *
-   * @param sortOrderIndex
-   */
-  void setSortOrderIndex(List<Integer> sortOrderIndex);
-
-  /**
-   * This method will set the sort reverse index of a dictionary column.
-   * Sort reverse index is the index of dictionary values before they are sorted.
-   *
-   * @param sortReverseOrderIndex
-   */
-  void setSortReverseOrderIndex(List<Integer> sortReverseOrderIndex);
-
-  /**
-   * dictionary metadata file length which will be set whenever we reload dictionary
-   * data from disk
-   *
-   * @param dictionaryMetaFileLength length of dictionary metadata file
-   */
-  void setDictionaryMetaFileLength(long dictionaryMetaFileLength);
-
-  /**
-   * Dictionary meta file offset which will be read to check whether length of dictionary
-   * meta file has been modified
-   *
-   * @return
-   */
-  long getDictionaryMetaFileLength();
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/ForwardDictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/ForwardDictionary.java b/core/src/main/java/org/carbondata/core/cache/dictionary/ForwardDictionary.java
deleted file mode 100644
index 5ddd093..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/ForwardDictionary.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * This class will be used for dictionary key and value look up
- */
-public class ForwardDictionary implements Dictionary {
-
-  /**
-   * Object which will hold the information related to this dictionary column
-   */
-  private ColumnDictionaryInfo columnDictionaryInfo;
-
-  /**
-   * @param columnDictionaryInfo
-   */
-  public ForwardDictionary(ColumnDictionaryInfo columnDictionaryInfo) {
-    this.columnDictionaryInfo = columnDictionaryInfo;
-  }
-
-  /**
-   * This method will find and return the surrogate key for a given dictionary value
-   * Applicable scenario:
-   * 1. Incremental data load : Dictionary will not be generated for existing values. For
-   * that values have to be looked up in the existing dictionary cache.
-   * 2. Filter scenarios where from value surrogate key has to be found.
-   *
-   * @param value dictionary value
-   * @return if found returns key else 0
-   */
-  @Override public int getSurrogateKey(String value) {
-    return columnDictionaryInfo.getSurrogateKey(value);
-  }
-
-  /**
-   * This method will find and return the surrogate key for a given dictionary value
-   * Applicable scenario:
-   * 1. Incremental data load : Dictionary will not be generated for existing values. For
-   * that values have to be looked up in the existing dictionary cache.
-   * 2. Filter scenarios where from value surrogate key has to be found.
-   *
-   * @param value dictionary value as byte array
-   * @return if found returns key else 0
-   */
-  @Override public int getSurrogateKey(byte[] value) {
-    return columnDictionaryInfo.getSurrogateKey(value);
-  }
-
-  /**
-   * This method will find and return the dictionary value for a given surrogate key.
-   * Applicable scenarios:
-   * 1. Query final result preparation : While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return value if found else null
-   */
-  @Override public String getDictionaryValueForKey(int surrogateKey) {
-    return columnDictionaryInfo.getDictionaryValueForKey(surrogateKey);
-  }
-
-  /**
-   * This method will find and return the sort index for a given dictionary id.
-   * Applicable scenarios:
-   * 1. Used in case of order by queries when data sorting is required
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return if found returns key else 0
-   */
-  @Override public int getSortedIndex(int surrogateKey) {
-    return columnDictionaryInfo.getSortedIndex(surrogateKey);
-  }
-
-  /**
-   * This method will find and return the dictionary value from sorted index.
-   * Applicable scenarios:
-   * 1. Query final result preparation in case of order by queries:
-   * While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param sortedIndex sort index of dictionary value
-   * @return value if found else null
-   */
-  @Override public String getDictionaryValueFromSortedIndex(int sortedIndex) {
-    return columnDictionaryInfo.getDictionaryValueFromSortedIndex(sortedIndex);
-  }
-
-  /**
-   * The method return the dictionary chunks wrapper of a column
-   * The wrapper wraps the list<list<bye[]>> and provide the iterator to retrieve the chunks
-   * members.
-   * Applications Scenario:
-   * For preparing the column Sort info while writing the sort index file.
-   *
-   * @return
-   */
-  @Override public DictionaryChunksWrapper getDictionaryChunks() {
-    return columnDictionaryInfo.getDictionaryChunks();
-  }
-
-  /**
-   * This method will release the objects and set default value for primitive types
-   */
-  @Override public void clear() {
-    if (null != columnDictionaryInfo) {
-      columnDictionaryInfo.clear();
-      columnDictionaryInfo = null;
-    }
-  }
-
-  /**
-   * This method will read the surrogates based on search range.
-   *
-   * @param surrogates
-   */
-  public void getSurrogateKeyByIncrementalSearch(List<String> evaluateResultList,
-      List<Integer> surrogates) {
-    List<byte[]> byteValuesOfFilterMembers = new ArrayList<byte[]>(evaluateResultList.size());
-    byte[] keyData = null;
-    for (int i = 0; i < evaluateResultList.size(); i++) {
-      keyData = evaluateResultList.get(i)
-          .getBytes(Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
-      byteValuesOfFilterMembers.add(keyData);
-    }
-
-    columnDictionaryInfo
-        .getIncrementalSurrogateKeyFromDictionary(byteValuesOfFilterMembers, surrogates);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/ForwardDictionaryCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/ForwardDictionaryCache.java b/core/src/main/java/org/carbondata/core/cache/dictionary/ForwardDictionaryCache.java
deleted file mode 100644
index bee7714..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/ForwardDictionaryCache.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.cache.CacheType;
-import org.carbondata.core.cache.CarbonLRUCache;
-import org.carbondata.core.util.CarbonUtilException;
-
-/**
- * This class implements methods to create dictionary cache which will hold
- * dictionary chunks for look up of surrogate keys and values
- */
-public class ForwardDictionaryCache<K extends DictionaryColumnUniqueIdentifier,
-                                    V extends Dictionary>
-    extends AbstractDictionaryCache<K, V> {
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(ForwardDictionaryCache.class.getName());
-
-  /**
-   * @param carbonStorePath
-   * @param carbonLRUCache
-   */
-  public ForwardDictionaryCache(String carbonStorePath, CarbonLRUCache carbonLRUCache) {
-    super(carbonStorePath, carbonLRUCache);
-  }
-
-  /**
-   * This method will get the value for the given key. If value does not exist
-   * for the given key, it will check and load the value.
-   *
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   * @return dictionary
-   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
-   */
-  @Override public Dictionary get(DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier)
-      throws CarbonUtilException {
-    return getDictionary(dictionaryColumnUniqueIdentifier);
-  }
-
-  /**
-   * This method will return a list of values for the given list of keys.
-   * For each key, this method will check and load the data if required.
-   *
-   * @param dictionaryColumnUniqueIdentifiers unique identifier which contains dbName,
-   *                                          tableName and columnIdentifier
-   * @return list of dictionary
-   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
-   */
-  @Override public List<Dictionary> getAll(
-      List<DictionaryColumnUniqueIdentifier> dictionaryColumnUniqueIdentifiers)
-      throws CarbonUtilException {
-    boolean exceptionOccurredInDictionaryLoading = false;
-    String exceptionMessage = "";
-    List<Dictionary> forwardDictionaryObjectList =
-        new ArrayList<Dictionary>(dictionaryColumnUniqueIdentifiers.size());
-    List<Future<Dictionary>> taskSubmitList =
-        new ArrayList<>(dictionaryColumnUniqueIdentifiers.size());
-    ExecutorService executorService = Executors.newFixedThreadPool(thread_pool_size);
-    for (final DictionaryColumnUniqueIdentifier uniqueIdent : dictionaryColumnUniqueIdentifiers) {
-      taskSubmitList.add(executorService.submit(new Callable<Dictionary>() {
-        @Override public Dictionary call() throws CarbonUtilException {
-          Dictionary dictionary = getDictionary(uniqueIdent);
-          return dictionary;
-        }
-      }));
-    }
-    try {
-      executorService.shutdown();
-      executorService.awaitTermination(2, TimeUnit.HOURS);
-    } catch (InterruptedException e) {
-      LOGGER.error("Error loading the dictionary: " + e.getMessage());
-    }
-    for (int i = 0; i < taskSubmitList.size(); i++) {
-      try {
-        Dictionary columnDictionary = taskSubmitList.get(i).get();
-        forwardDictionaryObjectList.add(columnDictionary);
-      } catch (Throwable e) {
-        exceptionOccurredInDictionaryLoading = true;
-        exceptionMessage = e.getMessage();
-      }
-    }
-    if (exceptionOccurredInDictionaryLoading) {
-      clearDictionary(forwardDictionaryObjectList);
-      LOGGER.error(exceptionMessage);
-      throw new CarbonUtilException(exceptionMessage);
-    }
-    return forwardDictionaryObjectList;
-  }
-
-  /**
-   * This method will return the value for the given key. It will not check and load
-   * the data for the given key
-   *
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   * @return
-   */
-  @Override public Dictionary getIfPresent(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
-    Dictionary forwardDictionary = null;
-    ColumnDictionaryInfo columnDictionaryInfo = (ColumnDictionaryInfo) carbonLRUCache.get(
-        getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
-            CacheType.FORWARD_DICTIONARY));
-    if (null != columnDictionaryInfo) {
-      forwardDictionary = new ForwardDictionary(columnDictionaryInfo);
-      incrementDictionaryAccessCount(columnDictionaryInfo);
-    }
-    return forwardDictionary;
-  }
-
-  /**
-   * This method will remove the cache for a given key
-   *
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   */
-  @Override public void invalidate(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
-    carbonLRUCache.remove(
-        getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
-            CacheType.FORWARD_DICTIONARY));
-  }
-
-  /**
-   * This method will get the value for the given key. If value does not exist
-   * for the given key, it will check and load the value.
-   *
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   * @return dictionary
-   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
-   */
-  private Dictionary getDictionary(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier)
-      throws CarbonUtilException {
-    Dictionary forwardDictionary = null;
-    // create column dictionary info object only if dictionary and its
-    // metadata file exists for a given column identifier
-    if (!isFileExistsForGivenColumn(dictionaryColumnUniqueIdentifier)) {
-      throw new CarbonUtilException(
-          "Either dictionary or its metadata does not exist for column identifier :: "
-              + dictionaryColumnUniqueIdentifier.getColumnIdentifier());
-    }
-    String columnIdentifier = dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId();
-    ColumnDictionaryInfo columnDictionaryInfo =
-        getColumnDictionaryInfo(dictionaryColumnUniqueIdentifier, columnIdentifier);
-    // load sort index file in case of forward dictionary
-    checkAndLoadDictionaryData(dictionaryColumnUniqueIdentifier, columnDictionaryInfo,
-        getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
-            CacheType.FORWARD_DICTIONARY), true);
-    forwardDictionary = new ForwardDictionary(columnDictionaryInfo);
-    return forwardDictionary;
-  }
-
-  /**
-   * This method will check and create columnDictionaryInfo object for the given column
-   *
-   * @param dictionaryColumnUniqueIdentifier
-   * @param columnIdentifier
-   * @return
-   */
-  private ColumnDictionaryInfo getColumnDictionaryInfo(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier, String columnIdentifier) {
-    ColumnDictionaryInfo columnDictionaryInfo = (ColumnDictionaryInfo) carbonLRUCache
-        .get(getLruCacheKey(columnIdentifier, CacheType.FORWARD_DICTIONARY));
-    if (null == columnDictionaryInfo) {
-      synchronized (dictionaryColumnUniqueIdentifier) {
-        columnDictionaryInfo = (ColumnDictionaryInfo) carbonLRUCache
-            .get(getLruCacheKey(columnIdentifier, CacheType.FORWARD_DICTIONARY));
-        if (null == columnDictionaryInfo) {
-          columnDictionaryInfo =
-              new ColumnDictionaryInfo(dictionaryColumnUniqueIdentifier.getDataType());
-        }
-      }
-    }
-    return columnDictionaryInfo;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/ReverseDictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/ReverseDictionary.java b/core/src/main/java/org/carbondata/core/cache/dictionary/ReverseDictionary.java
deleted file mode 100644
index a1f50a4..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/ReverseDictionary.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-/**
- * This class will be used for dictionary key and value look up
- */
-public class ReverseDictionary implements Dictionary {
-
-  /**
-   * Object which will hold the information related to this dictionary column
-   */
-  private ColumnReverseDictionaryInfo columnReverseDictionaryInfo;
-
-  /**
-   * @param columnReverseDictionaryInfo
-   */
-  public ReverseDictionary(ColumnReverseDictionaryInfo columnReverseDictionaryInfo) {
-    this.columnReverseDictionaryInfo = columnReverseDictionaryInfo;
-  }
-
-  /**
-   * This method will find and return the surrogate key for a given dictionary value
-   * Applicable scenario:
-   * 1. Incremental data load : Dictionary will not be generated for existing values. For
-   * that values have to be looked up in the existing dictionary cache.
-   * 2. Filter scenarios where from value surrogate key has to be found.
-   *
-   * @param value dictionary value
-   * @return if found returns key else 0
-   */
-  @Override public int getSurrogateKey(String value) {
-    return columnReverseDictionaryInfo.getSurrogateKey(value);
-  }
-
-  /**
-   * This method will find and return the surrogate key for a given dictionary value
-   * Applicable scenario:
-   * 1. Incremental data load : Dictionary will not be generated for existing values. For
-   * that values have to be looked up in the existing dictionary cache.
-   * 2. Filter scenarios where from value surrogate key has to be found.
-   *
-   * @param value dictionary value as byte array
-   * @return if found returns key else 0
-   */
-  @Override public int getSurrogateKey(byte[] value) {
-    return columnReverseDictionaryInfo.getSurrogateKey(value);
-  }
-
-  /**
-   * This method will find and return the dictionary value for a given surrogate key.
-   * Applicable scenarios:
-   * 1. Query final result preparation : While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return value if found else null
-   */
-  @Override public String getDictionaryValueForKey(int surrogateKey) {
-    return columnReverseDictionaryInfo.getDictionaryValueForKey(surrogateKey);
-  }
-
-  /**
-   * This method will find and return the sort index for a given dictionary id.
-   * Applicable scenarios:
-   * 1. Used in case of order by queries when data sorting is required
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return if found returns key else 0
-   */
-  @Override public int getSortedIndex(int surrogateKey) {
-    return columnReverseDictionaryInfo.getSortedIndex(surrogateKey);
-  }
-
-  /**
-   * This method will find and return the dictionary value from sorted index.
-   * Applicable scenarios:
-   * 1. Query final result preparation in case of order by queries:
-   * While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param sortedIndex sort index of dictionary value
-   * @return value if found else null
-   */
-  @Override public String getDictionaryValueFromSortedIndex(int sortedIndex) {
-    return columnReverseDictionaryInfo.getDictionaryValueFromSortedIndex(sortedIndex);
-  }
-
-  /**
-   * The method return the dictionary chunks wrapper of a column
-   * The wrapper wraps the list<list<bye[]>> and provide the iterator to retrieve the chunks
-   * members.
-   * Applications Scenario:
-   * For preparing the column Sort info while writing the sort index file.
-   *
-   * @return
-   */
-  @Override public DictionaryChunksWrapper getDictionaryChunks() {
-    return columnReverseDictionaryInfo.getDictionaryChunks();
-  }
-
-  /**
-   * This method will release the objects and set default value for primitive types
-   */
-  @Override public void clear() {
-    if (null != columnReverseDictionaryInfo) {
-      columnReverseDictionaryInfo.clear();
-      columnReverseDictionaryInfo = null;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/ReverseDictionaryCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/ReverseDictionaryCache.java b/core/src/main/java/org/carbondata/core/cache/dictionary/ReverseDictionaryCache.java
deleted file mode 100644
index 6e49183..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/ReverseDictionaryCache.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.cache.CacheType;
-import org.carbondata.core.cache.CarbonLRUCache;
-import org.carbondata.core.util.CarbonUtilException;
-
-/**
- * This class implements methods to create dictionary cache which will hold
- * dictionary chunks for look up of surrogate keys and values
- */
-public class ReverseDictionaryCache<K extends DictionaryColumnUniqueIdentifier,
-    V extends Dictionary>
-    extends AbstractDictionaryCache<K, V> {
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(ForwardDictionaryCache.class.getName());
-
-  /**
-   * @param carbonStorePath
-   * @param carbonLRUCache
-   */
-  public ReverseDictionaryCache(String carbonStorePath, CarbonLRUCache carbonLRUCache) {
-    super(carbonStorePath, carbonLRUCache);
-  }
-
-  /**
-   * This method will get the value for the given key. If value does not exist
-   * for the given key, it will check and load the value.
-   *
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   * @return dictionary
-   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
-   */
-  @Override public Dictionary get(DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier)
-      throws CarbonUtilException {
-    return getDictionary(dictionaryColumnUniqueIdentifier);
-  }
-
-  /**
-   * This method will return a list of values for the given list of keys.
-   * For each key, this method will check and load the data if required.
-   *
-   * @param dictionaryColumnUniqueIdentifiers unique identifier which contains dbName,
-   *                                          tableName and columnIdentifier
-   * @return list of dictionary
-   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
-   */
-  @Override public List<Dictionary> getAll(
-      List<DictionaryColumnUniqueIdentifier> dictionaryColumnUniqueIdentifiers)
-      throws CarbonUtilException {
-    boolean exceptionOccurredInDictionaryLoading = false;
-    String exceptionMessage = "";
-    List<Dictionary> reverseDictionaryObjectList =
-        new ArrayList<Dictionary>(dictionaryColumnUniqueIdentifiers.size());
-    List<Future<Dictionary>> taskSubmitList =
-        new ArrayList<>(dictionaryColumnUniqueIdentifiers.size());
-    ExecutorService executorService = Executors.newFixedThreadPool(thread_pool_size);
-    for (final DictionaryColumnUniqueIdentifier uniqueIdent : dictionaryColumnUniqueIdentifiers) {
-      taskSubmitList.add(executorService.submit(new Callable<Dictionary>() {
-        @Override public Dictionary call() throws CarbonUtilException {
-          Dictionary dictionary = getDictionary(uniqueIdent);
-          return dictionary;
-        }
-      }));
-    }
-    try {
-      executorService.shutdown();
-      executorService.awaitTermination(2, TimeUnit.HOURS);
-    } catch (InterruptedException e) {
-      LOGGER.error("Error loading the dictionary: " + e.getMessage());
-    }
-    for (int i = 0; i < taskSubmitList.size(); i++) {
-      try {
-        Dictionary columnDictionary = taskSubmitList.get(i).get();
-        reverseDictionaryObjectList.add(columnDictionary);
-      } catch (Throwable e) {
-        exceptionOccurredInDictionaryLoading = true;
-        exceptionMessage = e.getMessage();
-      }
-    }
-    if (exceptionOccurredInDictionaryLoading) {
-      clearDictionary(reverseDictionaryObjectList);
-      LOGGER.error(exceptionMessage);
-      throw new CarbonUtilException(exceptionMessage);
-    }
-    return reverseDictionaryObjectList;
-  }
-
-  /**
-   * This method will return the value for the given key. It will not check and load
-   * the data for the given key
-   *
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   * @return
-   */
-  @Override public Dictionary getIfPresent(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
-    Dictionary reverseDictionary = null;
-    ColumnReverseDictionaryInfo columnReverseDictionaryInfo =
-        (ColumnReverseDictionaryInfo) carbonLRUCache.get(
-            getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
-                CacheType.REVERSE_DICTIONARY));
-    if (null != columnReverseDictionaryInfo) {
-      reverseDictionary = new ReverseDictionary(columnReverseDictionaryInfo);
-      incrementDictionaryAccessCount(columnReverseDictionaryInfo);
-    }
-    return reverseDictionary;
-  }
-
-  /**
-   * This method will remove the cache for a given key
-   *
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   */
-  @Override public void invalidate(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier) {
-    carbonLRUCache.remove(
-        getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
-            CacheType.REVERSE_DICTIONARY));
-  }
-
-  /**
-   * This method will get the value for the given key. If value does not exist
-   * for the given key, it will check and load the value.
-   *
-   * @param dictionaryColumnUniqueIdentifier unique identifier which contains dbName,
-   *                                         tableName and columnIdentifier
-   * @return dictionary
-   * @throws CarbonUtilException in case memory is not sufficient to load dictionary into memory
-   */
-  private Dictionary getDictionary(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier)
-      throws CarbonUtilException {
-    Dictionary reverseDictionary = null;
-    // create column dictionary info object only if dictionary and its
-    // metadata file exists for a given column identifier
-    if (!isFileExistsForGivenColumn(dictionaryColumnUniqueIdentifier)) {
-      throw new CarbonUtilException(
-          "Either dictionary or its metadata does not exist for column identifier :: "
-              + dictionaryColumnUniqueIdentifier.getColumnIdentifier());
-    }
-    String columnIdentifier = dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId();
-    ColumnReverseDictionaryInfo columnReverseDictionaryInfo =
-        getColumnReverseDictionaryInfo(dictionaryColumnUniqueIdentifier, columnIdentifier);
-    // do not load sort index file for reverse dictionary
-    checkAndLoadDictionaryData(dictionaryColumnUniqueIdentifier, columnReverseDictionaryInfo,
-        getLruCacheKey(dictionaryColumnUniqueIdentifier.getColumnIdentifier().getColumnId(),
-            CacheType.REVERSE_DICTIONARY), false);
-    reverseDictionary = new ReverseDictionary(columnReverseDictionaryInfo);
-    return reverseDictionary;
-  }
-
-  /**
-   * This method will check and create columnReverseDictionaryInfo object for the given column
-   *
-   * @param dictionaryColumnUniqueIdentifier
-   * @param columnIdentifier
-   * @return
-   */
-  private ColumnReverseDictionaryInfo getColumnReverseDictionaryInfo(
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier, String columnIdentifier) {
-    ColumnReverseDictionaryInfo columnReverseDictionaryInfo =
-        (ColumnReverseDictionaryInfo) carbonLRUCache
-            .get(getLruCacheKey(columnIdentifier, CacheType.REVERSE_DICTIONARY));
-    if (null == columnReverseDictionaryInfo) {
-      synchronized (dictionaryColumnUniqueIdentifier) {
-        columnReverseDictionaryInfo = (ColumnReverseDictionaryInfo) carbonLRUCache
-            .get(getLruCacheKey(columnIdentifier, CacheType.REVERSE_DICTIONARY));
-        if (null == columnReverseDictionaryInfo) {
-          columnReverseDictionaryInfo = new ColumnReverseDictionaryInfo();
-        }
-      }
-    }
-    return columnReverseDictionaryInfo;
-  }
-}



[07/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/ExpressionResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/ExpressionResult.java b/core/src/main/java/org/carbondata/scan/expression/ExpressionResult.java
deleted file mode 100644
index 0ad39f6..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/ExpressionResult.java
+++ /dev/null
@@ -1,472 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additiona   l information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression;
-
-import java.math.BigDecimal;
-import java.sql.Timestamp;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.CarbonProperties;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-
-public class ExpressionResult implements Comparable<ExpressionResult> {
-
-  private static final long serialVersionUID = 1L;
-  protected DataType dataType;
-
-  protected Object value;
-
-  private List<ExpressionResult> expressionResults;
-
-  public ExpressionResult(DataType dataType, Object value) {
-    this.dataType = dataType;
-    this.value = value;
-  }
-
-  public ExpressionResult(List<ExpressionResult> expressionResults) {
-    this.expressionResults = expressionResults;
-  }
-
-  public void set(DataType dataType, Object value) {
-    this.dataType = dataType;
-    this.value = value;
-    this.expressionResults = null;
-  }
-
-  public DataType getDataType() {
-    return dataType;
-  }
-
-  //CHECKSTYLE:OFF Approval No:Approval-V1R2C10_009
-  public Integer getInt() throws FilterIllegalMemberException {
-    if (value == null) {
-      return null;
-    }
-    try {
-      switch (this.getDataType()) {
-        case STRING:
-          try {
-            return Integer.parseInt(value.toString());
-          } catch (NumberFormatException e) {
-            throw new FilterIllegalMemberException(e);
-          }
-        case SHORT:
-          return ((Short) value).intValue();
-        case INT:
-        case DOUBLE:
-          if (value instanceof Double) {
-            return ((Double) value).intValue();
-          }
-          return (Integer) value;
-        case TIMESTAMP:
-          if (value instanceof Timestamp) {
-            return (int) (((Timestamp) value).getTime() % 1000);
-          } else {
-            return (Integer) value;
-          }
-        default:
-          throw new FilterIllegalMemberException(
-              "Cannot convert" + this.getDataType().name() + " to integer type value");
-      }
-
-    } catch (ClassCastException e) {
-      throw new FilterIllegalMemberException(
-          "Cannot convert" + this.getDataType().name() + " to Integer type value");
-    }
-  }
-
-  public Short getShort() throws FilterIllegalMemberException {
-    if (value == null) {
-      return null;
-    }
-    try {
-      switch (this.getDataType()) {
-        case STRING:
-          try {
-            return Short.parseShort(value.toString());
-          } catch (NumberFormatException e) {
-            throw new FilterIllegalMemberException(e);
-          }
-        case SHORT:
-        case INT:
-        case DOUBLE:
-
-          if (value instanceof Double) {
-            return ((Double) value).shortValue();
-          } else if (value instanceof Integer) {
-            return ((Integer) value).shortValue();
-          }
-          return (Short) value;
-
-        case TIMESTAMP:
-
-          if (value instanceof Timestamp) {
-            return (short) (((Timestamp) value).getTime() % 1000);
-          } else {
-            return (Short) value;
-          }
-
-        default:
-          throw new FilterIllegalMemberException(
-              "Cannot convert" + this.getDataType().name() + " to integer type value");
-      }
-
-    } catch (ClassCastException e) {
-      throw new FilterIllegalMemberException(
-          "Cannot convert" + this.getDataType().name() + " to Integer type value");
-    }
-  }
-
-  public String getString() throws FilterIllegalMemberException {
-    if (value == null) {
-      return null;
-    }
-    try {
-      switch (this.getDataType()) {
-        case TIMESTAMP:
-          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
-              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-          if (value instanceof Timestamp) {
-            return parser.format((Timestamp) value);
-          } else {
-            return parser.format(new Timestamp((long) value / 1000));
-          }
-
-        default:
-          return value.toString();
-      }
-    } catch (Exception e) {
-      throw new FilterIllegalMemberException(
-          "Cannot convert" + this.getDataType().name() + " to String type value");
-    }
-  }
-
-  public Double getDouble() throws FilterIllegalMemberException {
-    if (value == null) {
-      return null;
-    }
-    try {
-      switch (this.getDataType()) {
-        case STRING:
-          try {
-            return Double.parseDouble(value.toString());
-          } catch (NumberFormatException e) {
-            throw new FilterIllegalMemberException(e);
-          }
-        case SHORT:
-          return ((Short) value).doubleValue();
-        case INT:
-          return ((Integer) value).doubleValue();
-        case LONG:
-          return ((Long) value).doubleValue();
-        case DOUBLE:
-          return (Double) value;
-        case TIMESTAMP:
-          if (value instanceof Timestamp) {
-            return (double) ((Timestamp) value).getTime() * 1000;
-          } else {
-            return (Double) (value);
-          }
-        default:
-          throw new FilterIllegalMemberException(
-              "Cannot convert" + this.getDataType().name() + " to double type value");
-      }
-    } catch (ClassCastException e) {
-      throw new FilterIllegalMemberException(
-          "Cannot convert" + this.getDataType().name() + " to Double type value");
-    }
-  }
-  //CHECKSTYLE:ON
-
-  public Long getLong() throws FilterIllegalMemberException {
-    if (value == null) {
-      return null;
-    }
-    try {
-      switch (this.getDataType()) {
-        case STRING:
-          try {
-            return Long.parseLong(value.toString());
-          } catch (NumberFormatException e) {
-            throw new FilterIllegalMemberException(e);
-          }
-        case SHORT:
-          return ((Short) value).longValue();
-        case INT:
-          return (Long) value;
-        case LONG:
-          return (Long) value;
-        case DOUBLE:
-          return (Long) value;
-        case TIMESTAMP:
-          if (value instanceof Timestamp) {
-            return 1000 * ((Timestamp) value).getTime();
-          } else {
-            return (Long) value;
-          }
-        default:
-          throw new FilterIllegalMemberException(
-              "Cannot convert" + this.getDataType().name() + " to Long type value");
-      }
-    } catch (ClassCastException e) {
-      throw new FilterIllegalMemberException(
-          "Cannot convert" + this.getDataType().name() + " to Long type value");
-    }
-
-  }
-
-  //Add to judge for BigDecimal
-  public BigDecimal getDecimal() throws FilterIllegalMemberException {
-    if (value == null) {
-      return null;
-    }
-    try {
-      switch (this.getDataType()) {
-        case STRING:
-          try {
-            return new BigDecimal(value.toString());
-          } catch (NumberFormatException e) {
-            throw new FilterIllegalMemberException(e);
-          }
-        case SHORT:
-          return new BigDecimal((short) value);
-        case INT:
-          return new BigDecimal((int) value);
-        case LONG:
-          return new BigDecimal((long) value);
-        case DOUBLE:
-          return new BigDecimal(value.toString());
-        case DECIMAL:
-          return new BigDecimal(value.toString());
-        case TIMESTAMP:
-          if (value instanceof Timestamp) {
-            return new BigDecimal(1000 * ((Timestamp) value).getTime());
-          } else {
-            return new BigDecimal((long) value);
-          }
-        default:
-          throw new FilterIllegalMemberException(
-              "Cannot convert" + this.getDataType().name() + " to Long type value");
-      }
-    } catch (ClassCastException e) {
-      throw new FilterIllegalMemberException(
-          "Cannot convert" + this.getDataType().name() + " to Long type value");
-    }
-
-  }
-
-  public Long getTime() throws FilterIllegalMemberException {
-    if (value == null) {
-      return null;
-    }
-    try {
-      switch (this.getDataType()) {
-        case STRING:
-          // Currently the query engine layer only supports yyyy-MM-dd HH:mm:ss date format
-          // no matter in which format the data is been stored, so while retrieving the direct
-          // surrogate value for filter member first it should be converted in date form as per
-          // above format and needs to retrieve time stamp.
-          SimpleDateFormat parser =
-              new SimpleDateFormat(CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT);
-          Date dateToStr;
-          try {
-            dateToStr = parser.parse(value.toString());
-            return dateToStr.getTime() * 1000;
-          } catch (ParseException e) {
-            throw new FilterIllegalMemberException(
-                "Cannot convert" + this.getDataType().name() + " to Time/Long type value");
-          }
-        case SHORT:
-          return ((Short) value).longValue();
-        case INT:
-        case LONG:
-          return (Long) value;
-        case DOUBLE:
-          return (Long) value;
-        case TIMESTAMP:
-          if (value instanceof Timestamp) {
-            return ((Timestamp) value).getTime() * 1000;
-          } else {
-            return (Long) value;
-          }
-        default:
-          throw new FilterIllegalMemberException(
-              "Cannot convert" + this.getDataType().name() + " to Time/Long type value");
-      }
-    } catch (ClassCastException e) {
-      throw new FilterIllegalMemberException(
-          "Cannot convert" + this.getDataType().name() + " to Time/Long type value");
-    }
-
-  }
-
-  public Boolean getBoolean() throws FilterIllegalMemberException {
-    if (value == null) {
-      return null;
-    }
-    try {
-      switch (this.getDataType()) {
-        case STRING:
-          try {
-            return Boolean.parseBoolean(value.toString());
-          } catch (NumberFormatException e) {
-            throw new FilterIllegalMemberException(e);
-          }
-
-        case BOOLEAN:
-          return Boolean.parseBoolean(value.toString());
-
-        default:
-          throw new FilterIllegalMemberException(
-              "Cannot convert" + this.getDataType().name() + " to boolean type value");
-      }
-    } catch (ClassCastException e) {
-      throw new FilterIllegalMemberException(
-          "Cannot convert" + this.getDataType().name() + " to Boolean type value");
-    }
-  }
-
-  public List<ExpressionResult> getList() {
-    if (null == expressionResults) {
-      List<ExpressionResult> a = new ArrayList<ExpressionResult>(20);
-      a.add(new ExpressionResult(dataType, value));
-      return a;
-    } else {
-      return expressionResults;
-    }
-  }
-
-  public List<String> getListAsString() throws FilterIllegalMemberException {
-    List<String> evaluateResultListFinal = new ArrayList<String>(20);
-    List<ExpressionResult> evaluateResultList = getList();
-    for (ExpressionResult result : evaluateResultList) {
-      if (result.getString() == null) {
-        evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
-        continue;
-      }
-      evaluateResultListFinal.add(result.getString());
-    }
-    return evaluateResultListFinal;
-  }
-
-  @Override public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    if (null != expressionResults) {
-      result = prime * result + expressionResults.hashCode();
-    } else if (null != value) {
-      result = prime * result + value.toString().hashCode();
-    } else {
-      result = prime * result + "".hashCode();
-    }
-
-    return result;
-  }
-
-  @Override public boolean equals(Object obj) {
-    if (!(obj instanceof ExpressionResult)) {
-      return false;
-    }
-    if (this == obj) {
-      return true;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    ExpressionResult objToCompare = (ExpressionResult) obj;
-    boolean result = false;
-    if (this.value == objToCompare.value) {
-      return true;
-    }
-    try {
-      switch (this.getDataType()) {
-        case STRING:
-          result = this.getString().equals(objToCompare.getString());
-          break;
-        case SHORT:
-          result = this.getShort().equals(objToCompare.getShort());
-          break;
-        case INT:
-          result = this.getInt().equals(objToCompare.getInt());
-          break;
-        case LONG:
-        case TIMESTAMP:
-          result = this.getLong().equals(objToCompare.getLong());
-          break;
-        case DOUBLE:
-          result = this.getDouble().equals(objToCompare.getDouble());
-          break;
-        case DECIMAL:
-          result = this.getDecimal().equals(objToCompare.getDecimal());
-          break;
-        default:
-          break;
-      }
-    } catch (FilterIllegalMemberException ex) {
-      return false;
-    }
-
-    return result;
-  }
-
-  public boolean isNull() {
-    return value == null;
-  }
-
-  @Override public int compareTo(ExpressionResult o) {
-    try {
-      switch (o.dataType) {
-        case SHORT:
-        case INT:
-        case LONG:
-        case DOUBLE:
-          Double d1 = this.getDouble();
-          Double d2 = o.getDouble();
-          return d1.compareTo(d2);
-        case DECIMAL:
-          java.math.BigDecimal val1 = this.getDecimal();
-          java.math.BigDecimal val2 = o.getDecimal();
-          return val1.compareTo(val2);
-        case TIMESTAMP:
-          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
-              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-          Date date1 = null;
-          Date date2 = null;
-          date1 = parser.parse(this.getString());
-          date2 = parser.parse(o.getString());
-          return date1.compareTo(date2);
-        case STRING:
-        default:
-          return this.getString().compareTo(o.getString());
-      }
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/LeafExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/LeafExpression.java b/core/src/main/java/org/carbondata/scan/expression/LeafExpression.java
deleted file mode 100644
index 2392910..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/LeafExpression.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression;
-
-public abstract class LeafExpression extends Expression {
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/LiteralExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/LiteralExpression.java b/core/src/main/java/org/carbondata/scan/expression/LiteralExpression.java
deleted file mode 100644
index cd48c4c..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/LiteralExpression.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class LiteralExpression extends LeafExpression {
-
-  /**
-   *
-   */
-  private static final long serialVersionUID = 1L;
-  private Object value;
-  private DataType dataType;
-
-  public LiteralExpression(Object value, DataType dataType) {
-    this.value = value;
-    this.dataType = dataType;
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value) {
-    ExpressionResult expressionResult = new ExpressionResult(dataType, this.value);
-    return expressionResult;
-  }
-
-  public ExpressionResult getExpressionResult() {
-    ExpressionResult expressionResult = new ExpressionResult(dataType, this.value);
-    return expressionResult;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    // TODO Auto-generated method stub
-    return ExpressionType.LITERAL;
-  }
-
-  @Override public String getString() {
-    // TODO Auto-generated method stub
-    return "LiteralExpression(" + value + ')';
-  }
-
-  /**
-   * getLiteralExpDataType.
-   *
-   * @return
-   */
-  public DataType getLiteralExpDataType() {
-    return dataType;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/UnaryExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/UnaryExpression.java b/core/src/main/java/org/carbondata/scan/expression/UnaryExpression.java
deleted file mode 100644
index 0449b66..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/UnaryExpression.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression;
-
-public abstract class UnaryExpression extends Expression {
-
-  private static final long serialVersionUID = 1L;
-  protected Expression child;
-
-  public UnaryExpression(Expression child) {
-    this.child = child;
-    children.add(child);
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/UnknownExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/UnknownExpression.java b/core/src/main/java/org/carbondata/scan/expression/UnknownExpression.java
deleted file mode 100644
index 42624b1..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/UnknownExpression.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression;
-
-import java.util.List;
-
-public abstract class UnknownExpression extends Expression {
-
-  public abstract List<ColumnExpression> getAllColumnList();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/arithmetic/AddExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/arithmetic/AddExpression.java b/core/src/main/java/org/carbondata/scan/expression/arithmetic/AddExpression.java
deleted file mode 100644
index 589e2c3..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/arithmetic/AddExpression.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.arithmetic;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class AddExpression extends BinaryArithmeticExpression {
-  private static final long serialVersionUID = 7999436055420911612L;
-
-  public AddExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult addExprLeftRes = left.evaluate(value);
-    ExpressionResult addExprRightRes = right.evaluate(value);
-    ExpressionResult val1 = addExprLeftRes;
-    ExpressionResult val2 = addExprRightRes;
-    if (addExprLeftRes.isNull() || addExprRightRes.isNull()) {
-      addExprLeftRes.set(addExprLeftRes.getDataType(), null);
-      return addExprLeftRes;
-    }
-
-    if (addExprLeftRes.getDataType() != addExprRightRes.getDataType()) {
-      if (addExprLeftRes.getDataType().getPresedenceOrder() < addExprRightRes.getDataType()
-          .getPresedenceOrder()) {
-        val2 = addExprLeftRes;
-        val1 = addExprRightRes;
-      }
-    }
-    switch (val1.getDataType()) {
-      case STRING:
-      case DOUBLE:
-        addExprRightRes.set(DataType.DOUBLE, val1.getDouble() + val2.getDouble());
-        break;
-      case SHORT:
-        addExprRightRes.set(DataType.SHORT, val1.getShort() + val2.getShort());
-        break;
-      case INT:
-        addExprRightRes.set(DataType.INT, val1.getInt() + val2.getInt());
-        break;
-      case LONG:
-        addExprRightRes.set(DataType.LONG, val1.getLong() + val2.getLong());
-        break;
-      case DECIMAL:
-        addExprRightRes.set(DataType.DECIMAL, val1.getDecimal().add(val2.getDecimal()));
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "Incompatible datatype for applying Add Expression Filter " + val1.getDataType());
-    }
-    return addExprRightRes;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.ADD;
-  }
-
-  @Override public String getString() {
-    return "Add(" + left.getString() + ',' + right.getString() + ',';
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/arithmetic/BinaryArithmeticExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/arithmetic/BinaryArithmeticExpression.java b/core/src/main/java/org/carbondata/scan/expression/arithmetic/BinaryArithmeticExpression.java
deleted file mode 100644
index 9c109f7..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/arithmetic/BinaryArithmeticExpression.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.arithmetic;
-
-import org.carbondata.scan.expression.BinaryExpression;
-import org.carbondata.scan.expression.Expression;
-
-public abstract class BinaryArithmeticExpression extends BinaryExpression {
-
-  private static final long serialVersionUID = 1L;
-
-  public BinaryArithmeticExpression(Expression left, Expression right) {
-    super(left, right);
-    // TODO Auto-generated constructor stub
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/arithmetic/DivideExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/arithmetic/DivideExpression.java b/core/src/main/java/org/carbondata/scan/expression/arithmetic/DivideExpression.java
deleted file mode 100644
index e41bee4..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/arithmetic/DivideExpression.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.arithmetic;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class DivideExpression extends BinaryArithmeticExpression {
-  private static final long serialVersionUID = -7269266926782365612L;
-
-  public DivideExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult divideExprLeftRes = left.evaluate(value);
-    ExpressionResult divideExprRightRes = right.evaluate(value);
-    ExpressionResult val1 = divideExprLeftRes;
-    ExpressionResult val2 = divideExprRightRes;
-    if (divideExprLeftRes.isNull() || divideExprRightRes.isNull()) {
-      divideExprLeftRes.set(divideExprLeftRes.getDataType(), null);
-      return divideExprLeftRes;
-    }
-    if (divideExprLeftRes.getDataType() != divideExprRightRes.getDataType()) {
-      if (divideExprLeftRes.getDataType().getPresedenceOrder() < divideExprRightRes.getDataType()
-          .getPresedenceOrder()) {
-        val2 = divideExprLeftRes;
-        val1 = divideExprRightRes;
-      }
-    }
-    switch (val1.getDataType()) {
-      case STRING:
-      case DOUBLE:
-        divideExprRightRes.set(DataType.DOUBLE, val1.getDouble() / val2.getDouble());
-        break;
-      case SHORT:
-        divideExprRightRes.set(DataType.SHORT, val1.getShort() / val2.getShort());
-        break;
-      case INT:
-        divideExprRightRes.set(DataType.INT, val1.getInt() / val2.getInt());
-        break;
-      case LONG:
-        divideExprRightRes.set(DataType.LONG, val1.getLong() / val2.getLong());
-        break;
-      case DECIMAL:
-        divideExprRightRes.set(DataType.DECIMAL, val1.getDecimal().divide(val2.getDecimal()));
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "Incompatible datatype for applying Add Expression Filter " + divideExprLeftRes
-                .getDataType());
-    }
-    return divideExprRightRes;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.DIVIDE;
-  }
-
-  @Override public String getString() {
-    return "Divide(" + left.getString() + ',' + right.getString() + ')';
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/arithmetic/MultiplyExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/arithmetic/MultiplyExpression.java b/core/src/main/java/org/carbondata/scan/expression/arithmetic/MultiplyExpression.java
deleted file mode 100644
index 7c790d8..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/arithmetic/MultiplyExpression.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.arithmetic;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class MultiplyExpression extends BinaryArithmeticExpression {
-  private static final long serialVersionUID = 1L;
-
-  public MultiplyExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult multiplyExprLeftRes = left.evaluate(value);
-    ExpressionResult multiplyExprRightRes = right.evaluate(value);
-    ExpressionResult val1 = multiplyExprLeftRes;
-    ExpressionResult val2 = multiplyExprRightRes;
-    if (multiplyExprLeftRes.isNull() || multiplyExprRightRes.isNull()) {
-      multiplyExprLeftRes.set(multiplyExprLeftRes.getDataType(), null);
-      return multiplyExprLeftRes;
-    }
-
-    if (multiplyExprLeftRes.getDataType() != multiplyExprRightRes.getDataType()) {
-      if (multiplyExprLeftRes.getDataType().getPresedenceOrder() < multiplyExprRightRes
-          .getDataType().getPresedenceOrder()) {
-        val2 = multiplyExprLeftRes;
-        val1 = multiplyExprRightRes;
-      }
-    }
-    switch (val1.getDataType()) {
-      case STRING:
-      case DOUBLE:
-        multiplyExprRightRes.set(DataType.DOUBLE, val1.getDouble() * val2.getDouble());
-        break;
-      case SHORT:
-        multiplyExprRightRes.set(DataType.SHORT, val1.getShort() * val2.getShort());
-        break;
-      case INT:
-        multiplyExprRightRes.set(DataType.INT, val1.getInt() * val2.getInt());
-        break;
-      case LONG:
-        multiplyExprRightRes.set(DataType.LONG, val1.getLong() * val2.getLong());
-        break;
-      case DECIMAL:
-        multiplyExprRightRes.set(DataType.DECIMAL, val1.getDecimal().multiply(val2.getDecimal()));
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "Incompatible datatype for applying Add Expression Filter " + multiplyExprLeftRes
-                .getDataType());
-    }
-    return multiplyExprRightRes;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.MULTIPLY;
-  }
-
-  @Override public String getString() {
-    return "Substract(" + left.getString() + ',' + right.getString() + ')';
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/arithmetic/SubstractExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/arithmetic/SubstractExpression.java b/core/src/main/java/org/carbondata/scan/expression/arithmetic/SubstractExpression.java
deleted file mode 100644
index 682b725..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/arithmetic/SubstractExpression.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.arithmetic;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class SubstractExpression extends BinaryArithmeticExpression {
-
-  private static final long serialVersionUID = -8304726440185363102L;
-
-  public SubstractExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult subtractExprLeftRes = left.evaluate(value);
-    ExpressionResult subtractExprRightRes = right.evaluate(value);
-    ExpressionResult val1 = subtractExprLeftRes;
-    ExpressionResult val2 = subtractExprRightRes;
-    if (subtractExprLeftRes.isNull() || subtractExprRightRes.isNull()) {
-      subtractExprLeftRes.set(subtractExprLeftRes.getDataType(), null);
-      return subtractExprLeftRes;
-    }
-    if (subtractExprLeftRes.getDataType() != subtractExprRightRes.getDataType()) {
-      if (subtractExprLeftRes.getDataType().getPresedenceOrder() < subtractExprRightRes
-          .getDataType().getPresedenceOrder()) {
-        val2 = subtractExprLeftRes;
-        val1 = subtractExprRightRes;
-      }
-    }
-    switch (val1.getDataType()) {
-      case STRING:
-      case DOUBLE:
-        subtractExprRightRes.set(DataType.DOUBLE, val1.getDouble() - val2.getDouble());
-        break;
-      case SHORT:
-        subtractExprRightRes.set(DataType.SHORT, val1.getShort() - val2.getShort());
-        break;
-      case INT:
-        subtractExprRightRes.set(DataType.INT, val1.getInt() - val2.getInt());
-        break;
-      case LONG:
-        subtractExprRightRes.set(DataType.LONG, val1.getLong() - val2.getLong());
-        break;
-      case DECIMAL:
-        subtractExprRightRes
-            .set(DataType.DECIMAL, val1.getDecimal().subtract(val2.getDecimal()));
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "Incompatible datatype for applying Add Expression Filter " + subtractExprLeftRes
-                .getDataType());
-    }
-    return subtractExprRightRes;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.SUBSTRACT;
-  }
-
-  @Override public String getString() {
-    return "Substract(" + left.getString() + ',' + right.getString() + ')';
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/BinaryConditionalExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/BinaryConditionalExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/BinaryConditionalExpression.java
deleted file mode 100644
index 0c74ebf..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/BinaryConditionalExpression.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.logical.BinaryLogicalExpression;
-
-public abstract class BinaryConditionalExpression extends BinaryLogicalExpression
-    implements ConditionalExpression {
-
-  /**
-   *
-   */
-  private static final long serialVersionUID = 1L;
-
-  public BinaryConditionalExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/ConditionalExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/ConditionalExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/ConditionalExpression.java
deleted file mode 100644
index d9ed78f..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/ConditionalExpression.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import java.util.List;
-
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.ExpressionResult;
-
-public interface ConditionalExpression {
-
-  // Will get the column informations involved in the expressions by
-  // traversing the tree
-  List<ColumnExpression> getColumnList();
-
-  boolean isSingleDimension();
-
-  List<ExpressionResult> getLiterals();
-
-  /**
-   * will return the flag of direct dictionary column
-   *
-   * @return
-   */
-  boolean isDirectDictionaryColumns();
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/EqualToExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/EqualToExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/EqualToExpression.java
deleted file mode 100644
index 30b1916..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/EqualToExpression.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class EqualToExpression extends BinaryConditionalExpression {
-
-  private static final long serialVersionUID = 1L;
-  private boolean isNull;
-
-  public EqualToExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  public EqualToExpression(Expression left, Expression right, boolean isNull) {
-    super(left, right);
-    this.isNull = isNull;
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult elRes = left.evaluate(value);
-    ExpressionResult erRes = right.evaluate(value);
-
-    boolean result = false;
-
-    ExpressionResult val1 = elRes;
-    ExpressionResult val2 = erRes;
-
-    if (elRes.isNull() || erRes.isNull()) {
-      if (isNull) {
-        elRes.set(DataType.BOOLEAN, elRes.isNull() == erRes.isNull());
-      } else {
-        elRes.set(DataType.BOOLEAN, false);
-      }
-      return elRes;
-    }
-    //default implementation if the data types are different for the resultsets
-    if (elRes.getDataType() != erRes.getDataType()) {
-      if (elRes.getDataType().getPresedenceOrder() < erRes.getDataType().getPresedenceOrder()) {
-        val2 = elRes;
-        val1 = erRes;
-      }
-    }
-
-    switch (val1.getDataType()) {
-      case STRING:
-        result = val1.getString().equals(val2.getString());
-        break;
-      case SHORT:
-        result = val1.getShort().equals(val2.getShort());
-        break;
-      case INT:
-        result = val1.getInt().equals(val2.getInt());
-        break;
-      case DOUBLE:
-        result = val1.getDouble().equals(val2.getDouble());
-        break;
-      case TIMESTAMP:
-        result = val1.getTime().equals(val2.getTime());
-        break;
-      case LONG:
-        result = val1.getLong().equals(val2.getLong());
-        break;
-      case DECIMAL:
-        result = val1.getDecimal().compareTo(val2.getDecimal()) == 0;
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "DataType: " + val1.getDataType() + " not supported for the filter expression");
-    }
-    val1.set(DataType.BOOLEAN, result);
-    return val1;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.EQUALS;
-  }
-
-  @Override public String getString() {
-    return "EqualTo(" + left.getString() + ',' + right.getString() + ')';
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/GreaterThanEqualToExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/GreaterThanEqualToExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/GreaterThanEqualToExpression.java
deleted file mode 100644
index ef562f0..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/GreaterThanEqualToExpression.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class GreaterThanEqualToExpression extends BinaryConditionalExpression {
-  private static final long serialVersionUID = 4185317066280688984L;
-
-  public GreaterThanEqualToExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult elRes = left.evaluate(value);
-    ExpressionResult erRes = right.evaluate(value);
-    ExpressionResult exprResVal1 = elRes;
-    if (elRes.isNull() || erRes.isNull()) {
-      elRes.set(DataType.BOOLEAN, false);
-      return elRes;
-    }
-    if (elRes.getDataType() != erRes.getDataType()) {
-      if (elRes.getDataType().getPresedenceOrder() < erRes.getDataType().getPresedenceOrder()) {
-        exprResVal1 = erRes;
-      }
-
-    }
-    boolean result = false;
-    switch (exprResVal1.getDataType()) {
-      case STRING:
-        result = elRes.getString().compareTo(erRes.getString()) >= 0;
-        break;
-      case SHORT:
-        result = elRes.getShort() >= (erRes.getShort());
-        break;
-      case INT:
-        result = elRes.getInt() >= (erRes.getInt());
-        break;
-      case DOUBLE:
-        result = elRes.getDouble() >= (erRes.getDouble());
-        break;
-      case TIMESTAMP:
-        result = elRes.getTime() >= (erRes.getTime());
-        break;
-      case LONG:
-        result = elRes.getLong() >= (erRes.getLong());
-        break;
-      case DECIMAL:
-        result = elRes.getDecimal().compareTo(erRes.getDecimal()) >= 0;
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "DataType: " + exprResVal1.getDataType() + " not supported for the filter expression");
-    }
-    exprResVal1.set(DataType.BOOLEAN, result);
-    return exprResVal1;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.GREATERTHAN_EQUALTO;
-  }
-
-  @Override public String getString() {
-    return "GreaterThanEqualTo(" + left.getString() + ',' + right.getString() + ')';
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/GreaterThanExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/GreaterThanExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/GreaterThanExpression.java
deleted file mode 100644
index ff2ff92..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/GreaterThanExpression.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-
-public class GreaterThanExpression extends BinaryConditionalExpression {
-  private static final long serialVersionUID = -5319109756575539219L;
-
-  public GreaterThanExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult exprLeftRes = left.evaluate(value);
-    ExpressionResult exprRightRes = right.evaluate(value);
-    ExpressionResult val1 = exprLeftRes;
-    if (exprLeftRes.isNull() || exprRightRes.isNull()) {
-      exprLeftRes.set(DataType.BOOLEAN, false);
-      return exprLeftRes;
-    }
-    if (exprLeftRes.getDataType() != exprRightRes.getDataType()) {
-      if (exprLeftRes.getDataType().getPresedenceOrder() < exprRightRes.getDataType()
-          .getPresedenceOrder()) {
-        val1 = exprRightRes;
-      }
-
-    }
-    boolean result = false;
-    switch (val1.getDataType()) {
-      case STRING:
-        result = exprLeftRes.getString().compareTo(exprRightRes.getString()) > 0;
-        break;
-      case DOUBLE:
-        result = exprLeftRes.getDouble() > (exprRightRes.getDouble());
-        break;
-      case SHORT:
-        result = exprLeftRes.getShort() > (exprRightRes.getShort());
-        break;
-      case INT:
-        result = exprLeftRes.getInt() > (exprRightRes.getInt());
-        break;
-      case TIMESTAMP:
-        result = exprLeftRes.getTime() > (exprRightRes.getTime());
-        break;
-      case LONG:
-        result = exprLeftRes.getLong() > (exprRightRes.getLong());
-        break;
-      case DECIMAL:
-        result = exprLeftRes.getDecimal().compareTo(exprRightRes.getDecimal()) > 0;
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "DataType: " + val1.getDataType() + " not supported for the filter expression");
-    }
-    val1.set(DataType.BOOLEAN, result);
-    return val1;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.GREATERTHAN;
-  }
-
-  @Override public String getString() {
-    return "GreaterThan(" + left.getString() + ',' + right.getString() + ')';
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/InExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/InExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/InExpression.java
deleted file mode 100644
index d821825..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/InExpression.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class InExpression extends BinaryConditionalExpression {
-  private static final long serialVersionUID = -3149927446694175489L;
-
-  protected transient Set<ExpressionResult> setOfExprResult;
-
-  public InExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult leftRsult = left.evaluate(value);
-
-    if (setOfExprResult == null) {
-      ExpressionResult rightRsult = right.evaluate(value);
-      ExpressionResult val = null;
-      setOfExprResult = new HashSet<ExpressionResult>(10);
-      for (ExpressionResult expressionResVal : rightRsult.getList()) {
-        if (expressionResVal.getDataType().getPresedenceOrder() < leftRsult.getDataType()
-            .getPresedenceOrder()) {
-          val = leftRsult;
-        } else {
-          val = expressionResVal;
-        }
-        switch (val.getDataType()) {
-          case STRING:
-            val = new ExpressionResult(val.getDataType(), expressionResVal.getString());
-            break;
-          case SHORT:
-            val = new ExpressionResult(val.getDataType(), expressionResVal.getShort());
-            break;
-          case INT:
-            val = new ExpressionResult(val.getDataType(), expressionResVal.getInt());
-            break;
-          case DOUBLE:
-            val = new ExpressionResult(val.getDataType(), expressionResVal.getDouble());
-            break;
-          case LONG:
-            val = new ExpressionResult(val.getDataType(), expressionResVal.getLong());
-            break;
-          case TIMESTAMP:
-            val = new ExpressionResult(val.getDataType(), expressionResVal.getTime());
-            break;
-          case DECIMAL:
-            val = new ExpressionResult(val.getDataType(), expressionResVal.getDecimal());
-            break;
-          default:
-            throw new FilterUnsupportedException(
-                "DataType: " + val.getDataType() + " not supported for the filter expression");
-        }
-        setOfExprResult.add(val);
-      }
-    }
-    leftRsult.set(DataType.BOOLEAN, setOfExprResult.contains(leftRsult));
-    return leftRsult;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.IN;
-  }
-
-  @Override public String getString() {
-    return "IN(" + left.getString() + ',' + right.getString() + ')';
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/LessThanEqualToExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/LessThanEqualToExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/LessThanEqualToExpression.java
deleted file mode 100644
index 500531e..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/LessThanEqualToExpression.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class LessThanEqualToExpression extends BinaryConditionalExpression {
-  private static final long serialVersionUID = 1L;
-
-  public LessThanEqualToExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult elRes = left.evaluate(value);
-    ExpressionResult erRes = right.evaluate(value);
-    ExpressionResult exprResValue1 = elRes;
-    if (elRes.isNull() || erRes.isNull()) {
-      elRes.set(DataType.BOOLEAN, false);
-      return elRes;
-    }
-    if (elRes.getDataType() != erRes.getDataType()) {
-      if (elRes.getDataType().getPresedenceOrder() < erRes.getDataType().getPresedenceOrder()) {
-        exprResValue1 = erRes;
-      }
-
-    }
-    boolean result = false;
-    switch (exprResValue1.getDataType()) {
-      case STRING:
-        result = elRes.getString().compareTo(erRes.getString()) <= 0;
-        break;
-      case SHORT:
-        result = elRes.getShort() <= (erRes.getShort());
-        break;
-      case INT:
-        result = elRes.getInt() <= (erRes.getInt());
-        break;
-      case DOUBLE:
-        result = elRes.getDouble() <= (erRes.getDouble());
-        break;
-      case TIMESTAMP:
-        result = elRes.getTime() <= (erRes.getTime());
-        break;
-      case LONG:
-        result = elRes.getLong() <= (erRes.getLong());
-        break;
-      case DECIMAL:
-        result = elRes.getDecimal().compareTo(erRes.getDecimal()) <= 0;
-        break;
-      default:
-        throw new FilterUnsupportedException("DataType: " + exprResValue1.getDataType()
-            + " not supported for the filter expression");
-    }
-    exprResValue1.set(DataType.BOOLEAN, result);
-    return exprResValue1;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    // TODO Auto-generated method stub
-    return ExpressionType.LESSTHAN_EQUALTO;
-  }
-
-  @Override public String getString() {
-    return "LessThanEqualTo(" + left.getString() + ',' + right.getString() + ')';
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/LessThanExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/LessThanExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/LessThanExpression.java
deleted file mode 100644
index 74d80ed..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/LessThanExpression.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class LessThanExpression extends BinaryConditionalExpression {
-
-  private static final long serialVersionUID = 6343040416663699924L;
-
-  public LessThanExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult erRes = right.evaluate(value);
-    ExpressionResult elRes = left.evaluate(value);
-
-    ExpressionResult val1 = elRes;
-
-    boolean result = false;
-
-    if (elRes.isNull() || erRes.isNull()) {
-      elRes.set(DataType.BOOLEAN, false);
-      return elRes;
-    }
-    if (elRes.getDataType() != erRes.getDataType()) {
-      if (elRes.getDataType().getPresedenceOrder() < erRes.getDataType().getPresedenceOrder()) {
-        val1 = erRes;
-      }
-
-    }
-    switch (val1.getDataType()) {
-      case STRING:
-        result = elRes.getString().compareTo(erRes.getString()) < 0;
-        break;
-      case SHORT:
-        result = elRes.getShort() < (erRes.getShort());
-        break;
-      case INT:
-        result = elRes.getInt() < (erRes.getInt());
-        break;
-      case DOUBLE:
-        result = elRes.getDouble() < (erRes.getDouble());
-        break;
-      case TIMESTAMP:
-        result = elRes.getTime() < (erRes.getTime());
-        break;
-      case LONG:
-        result = elRes.getLong() < (erRes.getLong());
-        break;
-      case DECIMAL:
-        result = elRes.getDecimal().compareTo(erRes.getDecimal()) < 0;
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "DataType: " + val1.getDataType() + " not supported for the filter expression");
-    }
-    val1.set(DataType.BOOLEAN, result);
-    return val1;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.LESSTHAN;
-  }
-
-  @Override public String getString() {
-    return "LessThan(" + left.getString() + ',' + right.getString() + ')';
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/ListExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/ListExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/ListExpression.java
deleted file mode 100644
index b04b2b1..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/ListExpression.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class ListExpression extends Expression {
-  private static final long serialVersionUID = 1L;
-
-  public ListExpression(List<Expression> children) {
-    this.children = children;
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value) throws FilterUnsupportedException {
-    List<ExpressionResult> listOfExprRes = new ArrayList<ExpressionResult>(10);
-
-    for (Expression expr : children) {
-      try {
-        listOfExprRes.add(expr.evaluate(value));
-      } catch (FilterIllegalMemberException e) {
-        continue;
-      }
-    }
-    return new ExpressionResult(listOfExprRes);
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    // TODO Auto-generated method stub
-    return ExpressionType.LIST;
-  }
-
-  @Override public String getString() {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/NotEqualsExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/NotEqualsExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/NotEqualsExpression.java
deleted file mode 100644
index 7d70ae3..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/NotEqualsExpression.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class NotEqualsExpression extends BinaryConditionalExpression {
-
-  private static final long serialVersionUID = 8684006025540863973L;
-  private boolean isNotNull = false;
-  public NotEqualsExpression(Expression left, Expression right, boolean isNotNull) {
-    super(left, right);
-    this.isNotNull = isNotNull;
-  }
-
-  public NotEqualsExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult elRes = left.evaluate(value);
-    ExpressionResult erRes = right.evaluate(value);
-
-    boolean result = false;
-    ExpressionResult val1 = elRes;
-    ExpressionResult val2 = erRes;
-    if (elRes.isNull() || erRes.isNull()) {
-      if (isNotNull) {
-        elRes.set(DataType.BOOLEAN, elRes.isNull() != erRes.isNull());
-      } else {
-        elRes.set(DataType.BOOLEAN, false);
-      }
-      return elRes;
-    }
-    //default implementation if the data types are different for the resultsets
-    if (elRes.getDataType() != erRes.getDataType()) {
-      //            result = elRes.getString().equals(erRes.getString());
-      if (elRes.getDataType().getPresedenceOrder() < erRes.getDataType().getPresedenceOrder()) {
-        val1 = erRes;
-        val2 = elRes;
-      }
-    }
-    switch (val1.getDataType()) {
-      case STRING:
-        result = !val1.getString().equals(val2.getString());
-        break;
-      case SHORT:
-        result = val1.getShort().shortValue() != val2.getShort().shortValue();
-        break;
-      case INT:
-        result = val1.getInt().intValue() != val2.getInt().intValue();
-        break;
-      case DOUBLE:
-        result = val1.getDouble().doubleValue() != val2.getDouble().doubleValue();
-        break;
-      case TIMESTAMP:
-        result = val1.getTime().longValue() != val2.getTime().longValue();
-        break;
-      case LONG:
-        result = elRes.getLong().longValue() != (erRes.getLong()).longValue();
-        break;
-      case DECIMAL:
-        result = elRes.getDecimal().compareTo(erRes.getDecimal()) != 0;
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "DataType: " + val1.getDataType() + " not supported for the filter expression");
-    }
-    val1.set(DataType.BOOLEAN, result);
-    return val1;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.NOT_EQUALS;
-  }
-
-  @Override public String getString() {
-    return "NotEquals(" + left.getString() + ',' + right.getString() + ')';
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/conditional/NotInExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/conditional/NotInExpression.java b/core/src/main/java/org/carbondata/scan/expression/conditional/NotInExpression.java
deleted file mode 100644
index 0c0868b..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/conditional/NotInExpression.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.conditional;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class NotInExpression extends BinaryConditionalExpression {
-  private static final long serialVersionUID = -6835841923752118034L;
-  protected transient Set<ExpressionResult> setOfExprResult;
-
-  public NotInExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult leftRsult = left.evaluate(value);
-    if (setOfExprResult == null) {
-      ExpressionResult val = null;
-      ExpressionResult rightRsult = right.evaluate(value);
-      setOfExprResult = new HashSet<ExpressionResult>(10);
-      for (ExpressionResult exprResVal : rightRsult.getList()) {
-        if (exprResVal.getDataType().getPresedenceOrder() < leftRsult.getDataType()
-            .getPresedenceOrder()) {
-          val = leftRsult;
-        } else {
-          val = exprResVal;
-        }
-        switch (val.getDataType()) {
-          case STRING:
-            val = new ExpressionResult(val.getDataType(), exprResVal.getString());
-            break;
-          case SHORT:
-            val = new ExpressionResult(val.getDataType(), exprResVal.getShort());
-            break;
-          case INT:
-            val = new ExpressionResult(val.getDataType(), exprResVal.getInt());
-            break;
-          case DOUBLE:
-            val = new ExpressionResult(val.getDataType(), exprResVal.getDouble());
-            break;
-          case TIMESTAMP:
-            val = new ExpressionResult(val.getDataType(), exprResVal.getTime());
-            break;
-          case LONG:
-            val = new ExpressionResult(val.getDataType(), exprResVal.getLong());
-            break;
-          case DECIMAL:
-            val = new ExpressionResult(val.getDataType(), exprResVal.getDecimal());
-            break;
-          default:
-            throw new FilterUnsupportedException(
-                "DataType: " + val.getDataType() + " not supported for the filter expression");
-        }
-        setOfExprResult.add(val);
-      }
-    }
-    leftRsult.set(DataType.BOOLEAN, !setOfExprResult.contains(leftRsult));
-
-    return leftRsult;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.NOT_IN;
-  }
-
-  @Override public String getString() {
-    return "NOT IN(" + left.getString() + ',' + right.getString() + ')';
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/exception/FilterIllegalMemberException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/exception/FilterIllegalMemberException.java b/core/src/main/java/org/carbondata/scan/expression/exception/FilterIllegalMemberException.java
deleted file mode 100644
index 7130113..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/exception/FilterIllegalMemberException.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.exception;
-
-import java.util.Locale;
-
-/**
- * FilterIllegalMemberException class representing exception which can cause while evaluating
- * filter members needs to be gracefully handled without propagating to outer layer so that
- * the execution should not get interrupted.
- */
-public class FilterIllegalMemberException extends Exception {
-
-  /**
-   * default serial version ID.
-   */
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * The Error message.
-   */
-  private String msg = "";
-
-  /**
-   * Constructor
-   *
-   * @param errorCode The error code for this exception.
-   * @param msg       The error message for this exception.
-   */
-  public FilterIllegalMemberException(String msg) {
-    super(msg);
-    this.msg = msg;
-  }
-
-  /**
-   * Constructor
-   *
-   * @param errorCode The error code for this exception.
-   * @param msg       The error message for this exception.
-   */
-  public FilterIllegalMemberException(String msg, Throwable t) {
-    super(msg, t);
-    this.msg = msg;
-  }
-
-  /**
-   * Constructor
-   *
-   * @param errorCode The error code for this exception.
-   * @param msg       The error message for this exception.
-   */
-  public FilterIllegalMemberException(Throwable t) {
-    super(t);
-  }
-
-  /**
-   * This method is used to get the localized message.
-   *
-   * @param locale - A Locale object represents a specific geographical,
-   *               political, or cultural region.
-   * @return - Localized error message.
-   */
-  public String getLocalizedMessage(Locale locale) {
-    return "";
-  }
-
-  /**
-   * getLocalizedMessage
-   */
-  @Override public String getLocalizedMessage() {
-    return super.getLocalizedMessage();
-  }
-
-  /**
-   * getMessage
-   */
-  public String getMessage() {
-    return this.msg;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/exception/FilterUnsupportedException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/exception/FilterUnsupportedException.java b/core/src/main/java/org/carbondata/scan/expression/exception/FilterUnsupportedException.java
deleted file mode 100644
index dbc406e..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/exception/FilterUnsupportedException.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.exception;
-
-import java.util.Locale;
-
-public class FilterUnsupportedException extends Exception {
-
-  /**
-   * default serial version ID.
-   */
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * The Error message.
-   */
-  private String msg = "";
-
-  /**
-   * Constructor
-   *
-   * @param errorCode The error code for this exception.
-   * @param msg       The error message for this exception.
-   */
-  public FilterUnsupportedException(String msg) {
-    super(msg);
-    this.msg = msg;
-  }
-
-  /**
-   * Constructor
-   *
-   * @param errorCode The error code for this exception.
-   * @param msg       The error message for this exception.
-   */
-  public FilterUnsupportedException(String msg, Throwable t) {
-    super(msg, t);
-    this.msg = msg;
-  }
-
-  /**
-   * Constructor
-   *
-   * @param errorCode The error code for this exception.
-   * @param msg       The error message for this exception.
-   */
-  public FilterUnsupportedException(Throwable t) {
-    super(t);
-  }
-
-  /**
-   * This method is used to get the localized message.
-   *
-   * @param locale - A Locale object represents a specific geographical,
-   *               political, or cultural region.
-   * @return - Localized error message.
-   */
-  public String getLocalizedMessage(Locale locale) {
-    return "";
-  }
-
-  /**
-   * getLocalizedMessage
-   */
-  @Override public String getLocalizedMessage() {
-    return super.getLocalizedMessage();
-  }
-
-  /**
-   * getMessage
-   */
-  public String getMessage() {
-    return this.msg;
-  }
-}



[37/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryMetadataReaderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryMetadataReaderImpl.java b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryMetadataReaderImpl.java
new file mode 100644
index 0000000..05c76ef
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryMetadataReaderImpl.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.reader;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.factory.CarbonCommonFactory;
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.service.PathService;
+import org.apache.carbondata.format.ColumnDictionaryChunkMeta;
+
+import org.apache.thrift.TBase;
+
+/**
+ * This class perform the functionality of reading the dictionary metadata file
+ */
+public class CarbonDictionaryMetadataReaderImpl implements CarbonDictionaryMetadataReader {
+
+  /**
+   * carbon table identifier
+   */
+  protected CarbonTableIdentifier carbonTableIdentifier;
+
+  /**
+   * HDFS store path
+   */
+  protected String hdfsStorePath;
+
+  /**
+   * column identifier
+   */
+  protected ColumnIdentifier columnIdentifier;
+
+  /**
+   * dictionary metadata file path
+   */
+  protected String columnDictionaryMetadataFilePath;
+
+  /**
+   * dictionary metadata thrift file reader
+   */
+  private ThriftReader dictionaryMetadataFileReader;
+
+  /**
+   * Constructor
+   *
+   * @param hdfsStorePath         HDFS store path
+   * @param carbonTableIdentifier table identifier which will give table name and database name
+   * @param columnIdentifier      column unique identifier
+   */
+  public CarbonDictionaryMetadataReaderImpl(String hdfsStorePath,
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier) {
+    this.hdfsStorePath = hdfsStorePath;
+    this.carbonTableIdentifier = carbonTableIdentifier;
+    this.columnIdentifier = columnIdentifier;
+    initFileLocation();
+  }
+
+  /**
+   * This method will be used to read complete metadata file.
+   * Applicable scenarios:
+   * 1. Query execution. Whenever a query is executed then to read the dictionary file
+   * and define the query scope first dictionary metadata has to be read first.
+   * 2. If dictionary file is read using start and end offset then using this meta list
+   * we can count the total number of dictionary chunks present between the 2 offsets
+   *
+   * @return list of all dictionary meta chunks which contains information for each segment
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public List<CarbonDictionaryColumnMetaChunk> read() throws IOException {
+    List<CarbonDictionaryColumnMetaChunk> dictionaryMetaChunks =
+        new ArrayList<CarbonDictionaryColumnMetaChunk>(
+            CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    CarbonDictionaryColumnMetaChunk columnMetaChunk = null;
+    ColumnDictionaryChunkMeta dictionaryChunkMeta = null;
+    // open dictionary meta thrift reader
+    openThriftReader();
+    // read till dictionary chunk count
+    while (dictionaryMetadataFileReader.hasNext()) {
+      // get the thrift object for dictionary chunk
+      dictionaryChunkMeta = (ColumnDictionaryChunkMeta) dictionaryMetadataFileReader.read();
+      // create a new instance of chunk meta wrapper using thrift object
+      columnMetaChunk = getNewInstanceOfCarbonDictionaryColumnMetaChunk(dictionaryChunkMeta);
+      dictionaryMetaChunks.add(columnMetaChunk);
+    }
+    return dictionaryMetaChunks;
+  }
+
+  /**
+   * This method will be used to read only the last entry of dictionary meta chunk.
+   * Applicable scenarios :
+   * 1. Global dictionary generation for incremental load. In this case only the
+   * last dictionary chunk meta entry has to be read to calculate min, max surrogate
+   * key and start and end offset for the new dictionary chunk.
+   * 2. Truncate operation. While writing dictionary file in case of incremental load
+   * dictionary file needs to be validated for any inconsistency. Here end offset of last
+   * dictionary chunk meta is validated with file size.
+   *
+   * @return last segment entry for dictionary chunk
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public CarbonDictionaryColumnMetaChunk readLastEntryOfDictionaryMetaChunk()
+      throws IOException {
+    ColumnDictionaryChunkMeta dictionaryChunkMeta = null;
+    // open dictionary meta thrift reader
+    openThriftReader();
+    // at the completion of while loop we will get the last dictionary chunk entry
+    while (dictionaryMetadataFileReader.hasNext()) {
+      // get the thrift object for dictionary chunk
+      dictionaryChunkMeta = (ColumnDictionaryChunkMeta) dictionaryMetadataFileReader.read();
+    }
+    // create a new instance of chunk meta wrapper using thrift object
+    CarbonDictionaryColumnMetaChunk columnMetaChunkForLastSegment =
+        getNewInstanceOfCarbonDictionaryColumnMetaChunk(dictionaryChunkMeta);
+    return columnMetaChunkForLastSegment;
+  }
+
+  /**
+   * Closes this stream and releases any system resources associated
+   * with it. If the stream is already closed then invoking this
+   * method has no effect.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public void close() throws IOException {
+    if (null != dictionaryMetadataFileReader) {
+      dictionaryMetadataFileReader.close();
+      dictionaryMetadataFileReader = null;
+    }
+  }
+
+  /**
+   * This method will form the path for dictionary metadata file for a given column
+   */
+  protected void initFileLocation() {
+    PathService pathService = CarbonCommonFactory.getPathService();
+    CarbonTablePath carbonTablePath =
+        pathService.getCarbonTablePath(columnIdentifier, this.hdfsStorePath, carbonTableIdentifier);
+    this.columnDictionaryMetadataFilePath =
+        carbonTablePath.getDictionaryMetaFilePath(columnIdentifier.getColumnId());
+  }
+
+  /**
+   * This method will open the dictionary file stream for reading
+   *
+   * @throws IOException thrift reader open method throws IOException
+   */
+  private void openThriftReader() throws IOException {
+    // initialise dictionary file reader which will return dictionary thrift object
+    // dictionary thrift object contains a list of byte buffer
+    if (null == dictionaryMetadataFileReader) {
+      dictionaryMetadataFileReader =
+          new ThriftReader(this.columnDictionaryMetadataFilePath, new ThriftReader.TBaseCreator() {
+            @Override public TBase create() {
+              return new ColumnDictionaryChunkMeta();
+            }
+          });
+      // Open it
+      dictionaryMetadataFileReader.open();
+    }
+
+  }
+
+  /**
+   * Given a thrift object thie method will create a new wrapper class object
+   * for dictionary chunk
+   *
+   * @param dictionaryChunkMeta reference for chunk meta thrift object
+   * @return wrapper object of dictionary chunk meta
+   */
+  private CarbonDictionaryColumnMetaChunk getNewInstanceOfCarbonDictionaryColumnMetaChunk(
+      ColumnDictionaryChunkMeta dictionaryChunkMeta) {
+    CarbonDictionaryColumnMetaChunk columnMetaChunk =
+        new CarbonDictionaryColumnMetaChunk(dictionaryChunkMeta.getMin_surrogate_key(),
+            dictionaryChunkMeta.getMax_surrogate_key(), dictionaryChunkMeta.getStart_offset(),
+            dictionaryChunkMeta.getEnd_offset(), dictionaryChunkMeta.getChunk_count());
+    return columnMetaChunk;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryReader.java b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryReader.java
new file mode 100644
index 0000000..dded6c2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryReader.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.reader;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * dictionary reader interface which declares methods for
+ * reading carbon dictionary files
+ */
+public interface CarbonDictionaryReader extends Closeable {
+  /**
+   * This method should be used when complete dictionary data needs to be read.
+   * Applicable scenarios :
+   * 1. Global dictionary generation in case of incremental load
+   * 2. Reading dictionary file on first time query
+   * 3. Loading a dictionary column in memory based on query requirement.
+   * This is a case where carbon column cache feature is enabled in which a
+   * column dictionary is read if it is present in the query.
+   *
+   * @return list of byte array. Each byte array is unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  List<byte[]> read() throws IOException;
+
+  /**
+   * This method should be used when data has to be read from a given offset.
+   * Applicable scenarios :
+   * 1. Incremental data load. If column dictionary is already loaded in memory
+   * and incremental load is done, then for the new query only new dictionary data
+   * has to be read form memory.
+   *
+   * @param startOffset start offset of dictionary file
+   * @return list of byte array. Each byte array is unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  List<byte[]> read(long startOffset) throws IOException;
+
+  /**
+   * This method will be used to read data between given start and end offset.
+   * Applicable scenarios:
+   * 1. Truncate operation. If there is any inconsistency while writing the dictionary file
+   * then we can give the start and end offset till where the data has to be retained.
+   *
+   * @param startOffset start offset of dictionary file
+   * @param endOffset   end offset of dictionary file
+   * @return list of byte array. Each byte array is unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  List<byte[]> read(long startOffset, long endOffset) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryReaderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryReaderImpl.java b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryReaderImpl.java
new file mode 100644
index 0000000..a843701
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryReaderImpl.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.reader;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.factory.CarbonCommonFactory;
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.service.PathService;
+import org.apache.carbondata.format.ColumnDictionaryChunk;
+
+import org.apache.thrift.TBase;
+
+/**
+ * This class performs the functionality of reading a carbon dictionary file.
+ * It implements various overloaded method for read functionality.
+ */
+public class CarbonDictionaryReaderImpl implements CarbonDictionaryReader {
+
+  /**
+   * carbon table identifier
+   */
+  protected CarbonTableIdentifier carbonTableIdentifier;
+
+  /**
+   * HDFS store path
+   */
+  protected String hdfsStorePath;
+
+  /**
+   * column name
+   */
+  protected ColumnIdentifier columnIdentifier;
+
+  /**
+   * dictionary file path
+   */
+  protected String columnDictionaryFilePath;
+
+  /**
+   * dictionary thrift file reader
+   */
+  private ThriftReader dictionaryFileReader;
+
+  /**
+   * Constructor
+   *
+   * @param hdfsStorePath         HDFS store path
+   * @param carbonTableIdentifier table identifier which will give table name and database name
+   * @param columnIdentifier      column unique identifier
+   */
+  public CarbonDictionaryReaderImpl(String hdfsStorePath,
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier) {
+    this.hdfsStorePath = hdfsStorePath;
+    this.carbonTableIdentifier = carbonTableIdentifier;
+    this.columnIdentifier = columnIdentifier;
+    initFileLocation();
+  }
+
+  /**
+   * This method should be used when complete dictionary data needs to be read.
+   * Applicable scenarios :
+   * 1. Global dictionary generation in case of incremental load
+   * 2. Reading dictionary file on first time query
+   * 3. Loading a dictionary column in memory based on query requirement.
+   * This is a case where carbon column cache feature is enabled in which a
+   * column dictionary is read if it is present in the query.
+   *
+   * @return list of byte array. Each byte array is unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public List<byte[]> read() throws IOException {
+    return read(0L);
+  }
+
+  /**
+   * This method should be used when data has to be read from a given offset.
+   * Applicable scenarios :
+   * 1. Incremental data load. If column dictionary is already loaded in memory
+   * and incremental load is done, then for the new query only new dictionary data
+   * has to be read form memory.
+   *
+   * @param startOffset start offset of dictionary file
+   * @return list of byte array. Each byte array is unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public List<byte[]> read(long startOffset) throws IOException {
+    List<CarbonDictionaryColumnMetaChunk> carbonDictionaryColumnMetaChunks =
+        readDictionaryMetadataFile();
+    // get the last entry for carbon dictionary meta chunk
+    CarbonDictionaryColumnMetaChunk carbonDictionaryColumnMetaChunk =
+        carbonDictionaryColumnMetaChunks.get(carbonDictionaryColumnMetaChunks.size() - 1);
+    // end offset till where the dictionary file has to be read
+    long endOffset = carbonDictionaryColumnMetaChunk.getEnd_offset();
+    return read(carbonDictionaryColumnMetaChunks, startOffset, endOffset);
+  }
+
+  /**
+   * This method will be used to read data between given start and end offset.
+   * Applicable scenarios:
+   * 1. Truncate operation. If there is any inconsistency while writing the dictionary file
+   * then we can give the start and end offset till where the data has to be retained.
+   *
+   * @param startOffset start offset of dictionary file
+   * @param endOffset   end offset of dictionary file
+   * @return list of byte array. Each byte array is unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public List<byte[]> read(long startOffset, long endOffset) throws IOException {
+    List<CarbonDictionaryColumnMetaChunk> carbonDictionaryColumnMetaChunks =
+        readDictionaryMetadataFile();
+    return read(carbonDictionaryColumnMetaChunks, startOffset, endOffset);
+  }
+
+  /**
+   * Closes this stream and releases any system resources associated
+   * with it. If the stream is already closed then invoking this
+   * method has no effect.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public void close() throws IOException {
+    if (null != dictionaryFileReader) {
+      dictionaryFileReader.close();
+      dictionaryFileReader = null;
+    }
+  }
+
+  /**
+   * @param carbonDictionaryColumnMetaChunks dictionary meta chunk list
+   * @param startOffset                      start offset for dictionary data file
+   * @param endOffset                        end offset till where data has
+   *                                         to be read from dictionary data file
+   * @return list of byte array dictionary values
+   * @throws IOException readDictionary file method throws IO exception
+   */
+  private List<byte[]> read(List<CarbonDictionaryColumnMetaChunk> carbonDictionaryColumnMetaChunks,
+      long startOffset, long endOffset) throws IOException {
+    // calculate the number of chunks to be read from dictionary file from start offset
+    int dictionaryChunkCountsToBeRead =
+        calculateTotalDictionaryChunkCountsToBeRead(carbonDictionaryColumnMetaChunks, startOffset,
+            endOffset);
+    // open dictionary file thrift reader
+    openThriftReader();
+    // read the required number of chunks from dictionary file
+    List<ColumnDictionaryChunk> columnDictionaryChunks =
+        readDictionaryFile(startOffset, dictionaryChunkCountsToBeRead);
+    // convert byte buffer list to byte array list of dictionary vlaues
+    List<byte[]> dictionaryValues =
+        new ArrayList<byte[]>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    for (ColumnDictionaryChunk dictionaryChunk : columnDictionaryChunks) {
+      convertAndFillByteBufferListToByteArrayList(dictionaryValues, dictionaryChunk.getValues());
+    }
+    return dictionaryValues;
+  }
+
+  /**
+   * This method will convert and fill list of byte buffer to list of byte array
+   *
+   * @param dictionaryValues          list of byte array. Each byte array is
+   *                                  unique dictionary value
+   * @param dictionaryValueBufferList dictionary thrift object which is a list of byte buffer.
+   *                                  Each dictionary value is a wrapped in byte buffer before
+   *                                  writing to file
+   */
+  private void convertAndFillByteBufferListToByteArrayList(List<byte[]> dictionaryValues,
+      List<ByteBuffer> dictionaryValueBufferList) {
+    for (ByteBuffer buffer : dictionaryValueBufferList) {
+      int length = buffer.limit();
+      byte[] value = new byte[length];
+      buffer.get(value, 0, value.length);
+      dictionaryValues.add(value);
+    }
+  }
+
+  /**
+   * This method will form the path for dictionary file for a given column
+   */
+  protected void initFileLocation() {
+    PathService pathService = CarbonCommonFactory.getPathService();
+    CarbonTablePath carbonTablePath = pathService.getCarbonTablePath(columnIdentifier,
+                this.hdfsStorePath, carbonTableIdentifier);
+    this.columnDictionaryFilePath = carbonTablePath
+        .getDictionaryFilePath(columnIdentifier.getColumnId());
+  }
+
+  /**
+   * This method will read the dictionary file and return the list of dictionary thrift object
+   *
+   * @param dictionaryStartOffset        start offset for dictionary file
+   * @param dictionaryChunkCountToBeRead number of dictionary chunks to be read
+   * @return list of dictionary chunks
+   * @throws IOException setReadOffset method throws I/O exception
+   */
+  private List<ColumnDictionaryChunk> readDictionaryFile(long dictionaryStartOffset,
+      int dictionaryChunkCountToBeRead) throws IOException {
+    List<ColumnDictionaryChunk> dictionaryChunks =
+        new ArrayList<ColumnDictionaryChunk>(dictionaryChunkCountToBeRead);
+    // skip the number of bytes if a start offset is given
+    dictionaryFileReader.setReadOffset(dictionaryStartOffset);
+    // read till dictionary chunk count
+    while (dictionaryFileReader.hasNext()
+        && dictionaryChunks.size() != dictionaryChunkCountToBeRead) {
+      dictionaryChunks.add((ColumnDictionaryChunk) dictionaryFileReader.read());
+    }
+    return dictionaryChunks;
+  }
+
+  /**
+   * This method will read the dictionary metadata file for a given column
+   * and calculate the number of chunks to be read from the dictionary file.
+   * It will do a strict validation for start and end offset as if the offsets are not
+   * exactly matching, because data is written in thrift format, the thrift object
+   * will not be retrieved properly
+   *
+   * @param dictionaryChunkMetaList    list of dictionary chunk metadata
+   * @param dictionaryChunkStartOffset start offset for a dictionary chunk
+   * @param dictionaryChunkEndOffset   end offset for a dictionary chunk
+   * @return
+   */
+  private int calculateTotalDictionaryChunkCountsToBeRead(
+      List<CarbonDictionaryColumnMetaChunk> dictionaryChunkMetaList,
+      long dictionaryChunkStartOffset, long dictionaryChunkEndOffset) {
+    boolean chunkWithStartOffsetFound = false;
+    int dictionaryChunkCount = 0;
+    for (CarbonDictionaryColumnMetaChunk metaChunk : dictionaryChunkMetaList) {
+      // find the column meta chunk whose start offset value matches
+      // with the given dictionary start offset
+      if (!chunkWithStartOffsetFound && dictionaryChunkStartOffset == metaChunk.getStart_offset()) {
+        chunkWithStartOffsetFound = true;
+      }
+      // start offset is found then keep adding the chunk count to be read
+      if (chunkWithStartOffsetFound) {
+        dictionaryChunkCount = dictionaryChunkCount + metaChunk.getChunk_count();
+      }
+      // when end offset is reached then break the loop
+      if (dictionaryChunkEndOffset == metaChunk.getEnd_offset()) {
+        break;
+      }
+    }
+    return dictionaryChunkCount;
+  }
+
+  /**
+   * This method will read dictionary metadata file and return the dictionary meta chunks
+   *
+   * @return list of dictionary metadata chunks
+   * @throws IOException read and close method throws IO exception
+   */
+  private List<CarbonDictionaryColumnMetaChunk> readDictionaryMetadataFile() throws IOException {
+    CarbonDictionaryMetadataReader columnMetadataReaderImpl = getDictionaryMetadataReader();
+    List<CarbonDictionaryColumnMetaChunk> dictionaryMetaChunkList = null;
+    // read metadata file
+    try {
+      dictionaryMetaChunkList = columnMetadataReaderImpl.read();
+    } finally {
+      // close the metadata reader
+      columnMetadataReaderImpl.close();
+    }
+    return dictionaryMetaChunkList;
+  }
+
+  /**
+   * @return
+   */
+  protected CarbonDictionaryMetadataReader getDictionaryMetadataReader() {
+    return new CarbonDictionaryMetadataReaderImpl(this.hdfsStorePath, carbonTableIdentifier,
+        this.columnIdentifier);
+  }
+
+  /**
+   * This method will open the dictionary file stream for reading
+   *
+   * @throws IOException thrift reader open method throws IOException
+   */
+  private void openThriftReader() throws IOException {
+    if (null == dictionaryFileReader) {
+      // initialise dictionary file reader which will return dictionary thrift object
+      // dictionary thrift object contains a list of byte buffer
+      dictionaryFileReader =
+          new ThriftReader(this.columnDictionaryFilePath, new ThriftReader.TBaseCreator() {
+            @Override public TBase create() {
+              return new ColumnDictionaryChunk();
+            }
+          });
+      // Open dictionary file reader
+      dictionaryFileReader.open();
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/reader/CarbonFooterReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/CarbonFooterReader.java b/core/src/main/java/org/apache/carbondata/core/reader/CarbonFooterReader.java
new file mode 100644
index 0000000..b9c3ae1
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/reader/CarbonFooterReader.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.reader;
+
+import java.io.IOException;
+
+import org.apache.carbondata.format.FileFooter;
+
+import org.apache.thrift.TBase;
+
+/**
+ * Reads the metadata from fact file in org.apache.carbondata.format.FileFooter thrift object
+ */
+public class CarbonFooterReader {
+
+  //Fact file path
+  private String filePath;
+
+  //From which offset of file this metadata should be read
+  private long offset;
+
+  public CarbonFooterReader(String filePath, long offset) {
+
+    this.filePath = filePath;
+    this.offset = offset;
+  }
+
+  /**
+   * It reads the metadata in FileFooter thrift object format.
+   *
+   * @return
+   * @throws IOException
+   */
+  public FileFooter readFooter() throws IOException {
+    ThriftReader thriftReader = openThriftReader(filePath);
+    thriftReader.open();
+    //Set the offset from where it should read
+    thriftReader.setReadOffset(offset);
+    FileFooter footer = (FileFooter) thriftReader.read();
+    thriftReader.close();
+    return footer;
+  }
+
+  /**
+   * Open the thrift reader
+   *
+   * @param filePath
+   * @return
+   * @throws IOException
+   */
+  private ThriftReader openThriftReader(String filePath) throws IOException {
+
+    ThriftReader thriftReader = new ThriftReader(filePath, new ThriftReader.TBaseCreator() {
+      @Override public TBase create() {
+        return new FileFooter();
+      }
+    });
+    return thriftReader;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/reader/CarbonIndexFileReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/CarbonIndexFileReader.java b/core/src/main/java/org/apache/carbondata/core/reader/CarbonIndexFileReader.java
new file mode 100644
index 0000000..7f9a984
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/reader/CarbonIndexFileReader.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.reader;
+
+import java.io.IOException;
+
+import org.apache.carbondata.format.BlockIndex;
+import org.apache.carbondata.format.IndexHeader;
+
+import org.apache.thrift.TBase;
+
+/**
+ * Reader class which will be used to read the index file
+ */
+public class CarbonIndexFileReader {
+
+  /**
+   * reader
+   */
+  private ThriftReader thriftReader;
+
+  /**
+   * Below method will be used to read the index header
+   *
+   * @return index header
+   * @throws IOException if any problem  while reader the header
+   */
+  public IndexHeader readIndexHeader() throws IOException {
+    IndexHeader indexHeader = (IndexHeader) thriftReader.read(new ThriftReader.TBaseCreator() {
+      @Override public TBase create() {
+        return new IndexHeader();
+      }
+    });
+    return indexHeader;
+  }
+
+  /**
+   * Below method will be used to close the reader
+   */
+  public void closeThriftReader() {
+    thriftReader.close();
+  }
+
+  /**
+   * Below method will be used to read the block index from fie
+   *
+   * @return block index info
+   * @throws IOException if problem while reading the block index
+   */
+  public BlockIndex readBlockIndexInfo() throws IOException {
+    BlockIndex blockInfo = (BlockIndex) thriftReader.read(new ThriftReader.TBaseCreator() {
+      @Override public TBase create() {
+        return new BlockIndex();
+      }
+    });
+    return blockInfo;
+  }
+
+  /**
+   * Open the thrift reader
+   *
+   * @param filePath
+   * @throws IOException
+   */
+  public void openThriftReader(String filePath) throws IOException {
+    thriftReader = new ThriftReader(filePath);
+    thriftReader.open();
+  }
+
+  /**
+   * check if any more object is present
+   *
+   * @return true if any more object can be read
+   * @throws IOException
+   */
+  public boolean hasNext() throws IOException {
+    return thriftReader.hasNext();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/reader/ThriftReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/ThriftReader.java b/core/src/main/java/org/apache/carbondata/core/reader/ThriftReader.java
new file mode 100644
index 0000000..0958349
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/reader/ThriftReader.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.reader;
+
+import java.io.DataInputStream;
+import java.io.IOException;
+
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+import org.apache.thrift.TBase;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TIOStreamTransport;
+
+/**
+ * A simple class for reading Thrift objects (of a single type) from a fileName.
+ */
+public class ThriftReader {
+  /**
+   * buffer size
+   */
+  private static final int bufferSize = 2048;
+  /**
+   * File containing the objects.
+   */
+  private String fileName;
+  /**
+   * Used to create empty objects that will be initialized with values from the fileName.
+   */
+  private TBaseCreator creator;
+  /**
+   * For reading the fileName.
+   */
+  private DataInputStream dataInputStream;
+  /**
+   * For reading the binary thrift objects.
+   */
+  private TProtocol binaryIn;
+
+  /**
+   * Constructor.
+   */
+  public ThriftReader(String fileName, TBaseCreator creator) {
+    this.fileName = fileName;
+    this.creator = creator;
+  }
+
+  /**
+   * Constructor.
+   */
+  public ThriftReader(String fileName) {
+    this.fileName = fileName;
+  }
+
+  /**
+   * Opens the fileName for reading.
+   */
+  public void open() throws IOException {
+    FileFactory.FileType fileType = FileFactory.getFileType(fileName);
+    dataInputStream = FileFactory.getDataInputStream(fileName, fileType, bufferSize);
+    binaryIn = new TCompactProtocol(new TIOStreamTransport(dataInputStream));
+  }
+
+  /**
+   * This method will set the position of stream from where data has to be read
+   */
+  public void setReadOffset(long bytesToSkip) throws IOException {
+    if (dataInputStream.skip(bytesToSkip) != bytesToSkip) {
+      throw new IOException("It doesn't set the offset properly");
+    }
+  }
+
+  /**
+   * Checks if another objects is available by attempting to read another byte from the stream.
+   */
+  public boolean hasNext() throws IOException {
+    dataInputStream.mark(1);
+    int val = dataInputStream.read();
+    dataInputStream.reset();
+    return val != -1;
+  }
+
+  /**
+   * Reads the next object from the fileName.
+   */
+  public TBase read() throws IOException {
+    TBase t = creator.create();
+    try {
+      t.read(binaryIn);
+    } catch (TException e) {
+      throw new IOException(e);
+    }
+    return t;
+  }
+
+  /**
+   * Reads the next object from the fileName.
+   *
+   * @param creator type of object which will be returned
+   * @throws IOException any problem while reading
+   */
+  public TBase read(TBaseCreator creator) throws IOException {
+    TBase t = creator.create();
+    try {
+      t.read(binaryIn);
+    } catch (TException e) {
+      throw new IOException(e);
+    }
+    return t;
+  }
+
+  /**
+   * Close the fileName.
+   */
+  public void close() {
+    CarbonUtil.closeStreams(dataInputStream);
+  }
+
+  /**
+   * Thrift deserializes by taking an existing object and populating it. ThriftReader
+   * needs a way of obtaining instances of the class to be populated and this interface
+   * defines the mechanism by which a client provides these instances.
+   */
+  public static interface TBaseCreator {
+    TBase create();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReader.java b/core/src/main/java/org/apache/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReader.java
new file mode 100644
index 0000000..e0bb413
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReader.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.reader.sortindex;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Interface for reading the dictionary sort index and sort index inverted
+ */
+public interface CarbonDictionarySortIndexReader extends Closeable {
+
+  /**
+   * method for reading the carbon dictionary sort index data
+   * from columns sortIndex file.
+   *
+   * @return The method return's the list of dictionary sort Index and sort Index reverse
+   * @throws IOException In case any I/O error occurs
+   */
+  public List<Integer> readSortIndex() throws IOException;
+
+  /**
+   * method for reading the carbon dictionary inverted sort index data
+   * from columns sortIndex file.
+   *
+   * @return The method return's the list of dictionary inverted sort Index
+   * @throws IOException In case any I/O error occurs
+   */
+  public List<Integer> readInvertedSortIndex() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReaderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReaderImpl.java b/core/src/main/java/org/apache/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReaderImpl.java
new file mode 100644
index 0000000..70628b3
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/reader/sortindex/CarbonDictionarySortIndexReaderImpl.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.reader.sortindex;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.carbondata.common.factory.CarbonCommonFactory;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.reader.CarbonDictionaryColumnMetaChunk;
+import org.apache.carbondata.core.reader.CarbonDictionaryMetadataReader;
+import org.apache.carbondata.core.reader.CarbonDictionaryMetadataReaderImpl;
+import org.apache.carbondata.core.reader.ThriftReader;
+import org.apache.carbondata.core.service.PathService;
+import org.apache.carbondata.format.ColumnSortInfo;
+
+import org.apache.thrift.TBase;
+
+/**
+ * Implementation for reading the dictionary sort index and inverted sort index .
+ */
+public class CarbonDictionarySortIndexReaderImpl implements CarbonDictionarySortIndexReader {
+
+  /**
+   * carbonTable Identifier holding the info of databaseName and tableName
+   */
+  protected CarbonTableIdentifier carbonTableIdentifier;
+
+  /**
+   * column name
+   */
+  protected ColumnIdentifier columnIdentifier;
+
+  /**
+   * hdfs store location
+   */
+  protected String carbonStorePath;
+
+  /**
+   * the path of the dictionary Sort Index file
+   */
+  protected String sortIndexFilePath;
+
+  /**
+   * Column sort info thrift instance.
+   */
+  ColumnSortInfo columnSortInfo = null;
+
+  /**
+   * Comment for <code>LOGGER</code>
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(CarbonDictionarySortIndexReaderImpl.class.getName());
+
+  /**
+   * dictionary sortIndex file Reader
+   */
+  private ThriftReader dictionarySortIndexThriftReader;
+
+  /**
+   * @param carbonTableIdentifier Carbon Table identifier holding the database name and table name
+   * @param columnIdentifier      column name
+   * @param carbonStorePath       carbon store path
+   */
+  public CarbonDictionarySortIndexReaderImpl(final CarbonTableIdentifier carbonTableIdentifier,
+      final ColumnIdentifier columnIdentifier, final String carbonStorePath) {
+    this.carbonTableIdentifier = carbonTableIdentifier;
+    this.columnIdentifier = columnIdentifier;
+    this.carbonStorePath = carbonStorePath;
+  }
+
+  /**
+   * method for reading the carbon dictionary sort index data
+   * from columns sortIndex file.
+   *
+   * @return The method return's the list of dictionary sort Index and sort Index reverse
+   * In case of no member for column empty list will be return
+   * @throws IOException In case any I/O error occurs
+   */
+  @Override public List<Integer> readSortIndex() throws IOException {
+    if (null == columnSortInfo) {
+      readColumnSortInfo();
+    }
+    return columnSortInfo.getSort_index();
+  }
+
+  /**
+   * method for reading the carbon dictionary sort index data
+   * from columns sortIndex file.
+   * In case of no member empty list will be return
+   *
+   * @throws IOException In case any I/O error occurs
+   */
+  private void readColumnSortInfo() throws IOException {
+    init();
+    try {
+      columnSortInfo = (ColumnSortInfo) dictionarySortIndexThriftReader.read();
+    } catch (IOException ie) {
+      LOGGER.error(ie, "problem while reading the column sort info.");
+      throw new IOException("problem while reading the column sort info.", ie);
+    } finally {
+      if (null != dictionarySortIndexThriftReader) {
+        dictionarySortIndexThriftReader.close();
+      }
+    }
+  }
+
+  /**
+   * method for reading the carbon dictionary inverted sort index data
+   * from columns sortIndex file.
+   *
+   * @return The method return's the list of dictionary inverted sort Index
+   * @throws IOException In case any I/O error occurs
+   */
+  @Override public List<Integer> readInvertedSortIndex() throws IOException {
+    if (null == columnSortInfo) {
+      readColumnSortInfo();
+    }
+    return columnSortInfo.getSort_index_inverted();
+  }
+
+  /**
+   * The method initializes the dictionary Sort Index file path
+   * and initialize and opens the thrift reader for dictionary sortIndex file.
+   *
+   * @throws IOException if any I/O errors occurs
+   */
+  private void init() throws IOException {
+    initPath();
+    openThriftReader();
+  }
+
+  protected void initPath() {
+    PathService pathService = CarbonCommonFactory.getPathService();
+    CarbonTablePath carbonTablePath =
+        pathService.getCarbonTablePath(columnIdentifier, carbonStorePath, carbonTableIdentifier);
+    try {
+      CarbonDictionaryColumnMetaChunk chunkMetaObjectForLastSegmentEntry =
+          getChunkMetaObjectForLastSegmentEntry();
+      long dictOffset = chunkMetaObjectForLastSegmentEntry.getEnd_offset();
+      this.sortIndexFilePath =
+          carbonTablePath.getSortIndexFilePath(columnIdentifier.getColumnId(), dictOffset);
+      if (!FileFactory
+          .isFileExist(this.sortIndexFilePath, FileFactory.getFileType(this.sortIndexFilePath))) {
+        this.sortIndexFilePath =
+            carbonTablePath.getSortIndexFilePath(columnIdentifier.getColumnId());
+      }
+    } catch (IOException e) {
+      this.sortIndexFilePath = carbonTablePath.getSortIndexFilePath(columnIdentifier.getColumnId());
+    }
+
+  }
+
+  /**
+   * This method will read the dictionary chunk metadata thrift object for last entry
+   *
+   * @return last entry of dictionary meta chunk
+   * @throws IOException if an I/O error occurs
+   */
+  private CarbonDictionaryColumnMetaChunk getChunkMetaObjectForLastSegmentEntry()
+      throws IOException {
+    CarbonDictionaryMetadataReader columnMetadataReaderImpl = getDictionaryMetadataReader();
+    try {
+      // read the last segment entry for dictionary metadata
+      return columnMetadataReaderImpl.readLastEntryOfDictionaryMetaChunk();
+    } finally {
+      // Close metadata reader
+      columnMetadataReaderImpl.close();
+    }
+  }
+
+  /**
+   * @return
+   */
+  protected CarbonDictionaryMetadataReader getDictionaryMetadataReader() {
+    return new CarbonDictionaryMetadataReaderImpl(carbonStorePath, carbonTableIdentifier,
+        columnIdentifier);
+  }
+
+  /**
+   * This method will open the dictionary sort index file stream for reading
+   *
+   * @throws IOException in case any I/O errors occurs
+   */
+  private void openThriftReader() throws IOException {
+    this.dictionarySortIndexThriftReader =
+        new ThriftReader(this.sortIndexFilePath, new ThriftReader.TBaseCreator() {
+          @Override public TBase create() {
+            return new ColumnSortInfo();
+          }
+        });
+    dictionarySortIndexThriftReader.open();
+  }
+
+  /**
+   * Closes this stream and releases any system resources associated
+   * with it. If the stream is already closed then invoking this
+   * method has no effect.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public void close() throws IOException {
+    if (null != dictionarySortIndexThriftReader) {
+      dictionarySortIndexThriftReader.close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/service/ColumnUniqueIdService.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/service/ColumnUniqueIdService.java b/core/src/main/java/org/apache/carbondata/core/service/ColumnUniqueIdService.java
new file mode 100644
index 0000000..1c97082
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/service/ColumnUniqueIdService.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.service;
+
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+
+/**
+ * Column Unique id generator
+ */
+public interface ColumnUniqueIdService {
+
+  /**
+   * @param databaseName
+   * @param columnSchema
+   * @return generate unique id
+   */
+  public String generateUniqueId(String databaseName, ColumnSchema columnSchema);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/service/DictionaryService.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/service/DictionaryService.java b/core/src/main/java/org/apache/carbondata/core/service/DictionaryService.java
new file mode 100644
index 0000000..9b9ade6
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/service/DictionaryService.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.service;
+
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.reader.CarbonDictionaryMetadataReader;
+import org.apache.carbondata.core.reader.CarbonDictionaryReader;
+import org.apache.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReader;
+import org.apache.carbondata.core.writer.CarbonDictionaryWriter;
+import org.apache.carbondata.core.writer.sortindex.CarbonDictionarySortIndexWriter;
+
+/**
+ * Dictionary service to get writer and reader
+ */
+public interface DictionaryService {
+
+  /**
+   * get dictionary writer
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param carbonStorePath
+   * @return
+   */
+  public CarbonDictionaryWriter getDictionaryWriter(CarbonTableIdentifier carbonTableIdentifier,
+      ColumnIdentifier columnIdentifier, String carbonStorePath);
+
+  /**
+   * get dictionary sort index writer
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param carbonStorePath
+   * @return
+   */
+  public CarbonDictionarySortIndexWriter getDictionarySortIndexWriter(
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
+      String carbonStorePath);
+
+  /**
+   * get dictionary metadata reader
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param carbonStorePath
+   * @return
+   */
+  public CarbonDictionaryMetadataReader getDictionaryMetadataReader(
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
+      String carbonStorePath);
+
+  /**
+   * get dictionary reader
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param carbonStorePath
+   * @return
+   */
+  public CarbonDictionaryReader getDictionaryReader(CarbonTableIdentifier carbonTableIdentifier,
+      ColumnIdentifier columnIdentifier, String carbonStorePath);
+
+  /**
+   * get dictionary sort index reader
+   *
+   * @param carbonTableIdentifier
+   * @param columnIdentifier
+   * @param carbonStorePath
+   * @return
+   */
+  public CarbonDictionarySortIndexReader getDictionarySortIndexReader(
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
+      String carbonStorePath);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/service/PathService.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/service/PathService.java b/core/src/main/java/org/apache/carbondata/core/service/PathService.java
new file mode 100644
index 0000000..d3295f5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/service/PathService.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.service;
+
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath;
+
+/**
+ * Create helper to get path details
+ */
+public interface PathService {
+
+  /**
+   * @param columnIdentifier
+   * @param storeLocation
+   * @param tableIdentifier
+   * @return store path related to tables
+   */
+  CarbonTablePath getCarbonTablePath(ColumnIdentifier columnIdentifier, String storeLocation,
+      CarbonTableIdentifier tableIdentifier);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
new file mode 100644
index 0000000..2f91d1e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
@@ -0,0 +1,320 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+import java.lang.reflect.Field;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+
+/**
+ * Util class for byte comparision
+ */
+public final class ByteUtil {
+
+  private static final int SIZEOF_LONG = 8;
+
+  private ByteUtil() {
+
+  }
+
+  /**
+   * Compare method for bytes
+   *
+   * @param buffer1
+   * @param buffer2
+   * @return
+   */
+  public static int compare(byte[] buffer1, byte[] buffer2) {
+    // Short circuit equal case
+    if (buffer1 == buffer2) {
+      return 0;
+    }
+    // Bring WritableComparator code local
+    int i = 0;
+    int j = 0;
+    for (; i < buffer1.length && j < buffer2.length; i++, j++) {
+      int a = (buffer1[i] & 0xff);
+      int b = (buffer2[j] & 0xff);
+      if (a != b) {
+        return a - b;
+      }
+    }
+    return 0;
+  }
+
+  /**
+   * covert the long[] to int[]
+   *
+   * @param longArray
+   * @return
+   */
+  public static int[] convertToIntArray(long[] longArray) {
+    int[] intArray = new int[longArray.length];
+    for (int i = 0; i < longArray.length; i++) {
+      intArray[i] = (int) longArray[i];
+
+    }
+    return intArray;
+  }
+
+  /**
+   * Unsafe comparator
+   */
+  public enum UnsafeComparer {
+    /**
+     * instance.
+     */
+    INSTANCE;
+
+    /**
+     * unsafe .
+     */
+    static final sun.misc.Unsafe THEUNSAFE;
+
+    /**
+     * The offset to the first element in a byte array.
+     */
+    static final int BYTE_ARRAY_BASE_OFFSET;
+    static final boolean LITTLEENDIAN = ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN);
+
+    static {
+      THEUNSAFE = (sun.misc.Unsafe) AccessController.doPrivileged(new PrivilegedAction<Object>() {
+        @Override public Object run() {
+          try {
+            Field f = sun.misc.Unsafe.class.getDeclaredField("theUnsafe");
+            f.setAccessible(true);
+            return f.get(null);
+          } catch (NoSuchFieldException e) {
+            // It doesn't matter what we throw;
+            // it's swallowed in getBestComparer().
+            throw new Error();
+          } catch (IllegalAccessException e) {
+            throw new Error();
+          }
+        }
+      });
+
+      BYTE_ARRAY_BASE_OFFSET = THEUNSAFE.arrayBaseOffset(byte[].class);
+
+      // sanity check - this should never fail
+      if (THEUNSAFE.arrayIndexScale(byte[].class) != 1) {
+        throw new AssertionError();
+      }
+
+    }
+
+    /**
+     * Returns true if x1 is less than x2, when both values are treated as
+     * unsigned.
+     */
+    static boolean lessThanUnsigned(long x1, long x2) {
+      return (x1 + Long.MIN_VALUE) < (x2 + Long.MIN_VALUE);
+    }
+
+    /**
+     * Lexicographically compare two arrays.
+     *
+     * @param buffer1 left operand
+     * @param buffer2 right operand
+     * @param offset1 Where to start comparing in the left buffer
+     * @param offset2 Where to start comparing in the right buffer
+     * @param length1 How much to compare from the left buffer
+     * @param length2 How much to compare from the right buffer
+     * @return 0 if equal, < 0 if left is less than right, etc.
+     */
+    public int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2,
+        int length2) {
+      // Short circuit equal case
+      if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) {
+        return 0;
+      }
+      int minLength = Math.min(length1, length2);
+      int minWords = minLength / SIZEOF_LONG;
+      int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET;
+      int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET;
+
+      /*
+       * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes
+       * at a time is no slower than comparing 4 bytes at a time even on
+       * 32-bit. On the other hand, it is substantially faster on 64-bit.
+       */
+      for (int i = 0; i < minWords * SIZEOF_LONG; i += SIZEOF_LONG) {
+        long lw = THEUNSAFE.getLong(buffer1, offset1Adj + (long) i);
+        long rw = THEUNSAFE.getLong(buffer2, offset2Adj + (long) i);
+        long diff = lw ^ rw;
+
+        if (diff != 0) {
+          if (!LITTLEENDIAN) {
+            return lessThanUnsigned(lw, rw) ? -1 : 1;
+          }
+
+          // Use binary search
+          int n = 0;
+          int y;
+          int x = (int) diff;
+          if (x == 0) {
+            x = (int) (diff >>> 32);
+            n = 32;
+          }
+
+          y = x << 16;
+          if (y == 0) {
+            n += 16;
+          } else {
+            x = y;
+          }
+
+          y = x << 8;
+          if (y == 0) {
+            n += 8;
+          }
+          return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL));
+        }
+      }
+
+      // The epilogue to cover the last (minLength % 8) elements.
+      for (int i = minWords * SIZEOF_LONG; i < minLength; i++) {
+        int a = (buffer1[offset1 + i] & 0xff);
+        int b = (buffer2[offset2 + i] & 0xff);
+        if (a != b) {
+          return a - b;
+        }
+      }
+      return length1 - length2;
+    }
+
+    public int compareTo(byte[] buffer1, byte[] buffer2) {
+
+      // Short circuit equal case
+      if (buffer1 == buffer2) {
+        return 0;
+      }
+      int len1 = buffer1.length;
+      int len2 = buffer2.length;
+      int minLength = (len1 <= len2) ? len1 : len2;
+      int minWords = 0;
+
+      /*
+       * Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes
+       * at a time is no slower than comparing 4 bytes at a time even on
+       * 32-bit. On the other hand, it is substantially faster on 64-bit.
+       */
+      if (minLength > 7) {
+        minWords = minLength / SIZEOF_LONG;
+        for (int i = 0; i < minWords * SIZEOF_LONG; i += SIZEOF_LONG) {
+          long lw = THEUNSAFE.getLong(buffer1, BYTE_ARRAY_BASE_OFFSET + (long) i);
+          long rw = THEUNSAFE.getLong(buffer2, BYTE_ARRAY_BASE_OFFSET + (long) i);
+          long diff = lw ^ rw;
+
+          if (diff != 0) {
+            if (!LITTLEENDIAN) {
+              return lessThanUnsigned(lw, rw) ? -1 : 1;
+            }
+
+            // Use binary search
+            int k = 0;
+            int y;
+            int x = (int) diff;
+            if (x == 0) {
+              x = (int) (diff >>> 32);
+              k = 32;
+            }
+            y = x << 16;
+            if (y == 0) {
+              k += 16;
+            } else {
+              x = y;
+            }
+
+            y = x << 8;
+            if (y == 0) {
+              k += 8;
+            }
+            return (int) (((lw >>> k) & 0xFFL) - ((rw >>> k) & 0xFFL));
+          }
+        }
+      }
+
+      // The epilogue to cover the last (minLength % 8) elements.
+      for (int i = minWords * SIZEOF_LONG; i < minLength; i++) {
+        int a = (buffer1[i] & 0xff);
+        int b = (buffer2[i] & 0xff);
+        if (a != b) {
+          return a - b;
+        }
+      }
+      return len1 - len2;
+    }
+
+    public boolean equals(byte[] buffer1, byte[] buffer2) {
+      if (buffer1.length != buffer2.length) {
+        return false;
+      }
+      int len = buffer1.length / 8;
+      long currentOffset = BYTE_ARRAY_BASE_OFFSET;
+      for (int i = 0; i < len; i++) {
+        long lw = THEUNSAFE.getLong(buffer1, currentOffset);
+        long rw = THEUNSAFE.getLong(buffer2, currentOffset);
+        if (lw != rw) {
+          return false;
+        }
+        currentOffset += 8;
+      }
+      len = buffer1.length % 8;
+      if (len > 0) {
+        for (int i = 0; i < len; i += 1) {
+          long lw = THEUNSAFE.getByte(buffer1, currentOffset);
+          long rw = THEUNSAFE.getByte(buffer2, currentOffset);
+          if (lw != rw) {
+            return false;
+          }
+          currentOffset += 1;
+        }
+      }
+      return true;
+    }
+
+    /**
+     * Comparing the 2 byte buffers. This is used in case of data load sorting step.
+     *
+     * @param byteBuffer1
+     * @param byteBuffer2
+     * @return
+     */
+    public int compareTo(ByteBuffer byteBuffer1, ByteBuffer byteBuffer2) {
+
+      // Short circuit equal case
+      if (byteBuffer1 == byteBuffer2) {
+        return 0;
+      }
+      int len1 = byteBuffer1.remaining();
+      int len2 = byteBuffer2.remaining();
+      byte[] buffer1 = new byte[len1];
+      byte[] buffer2 = new byte[len2];
+      byteBuffer1.get(buffer1);
+      byteBuffer2.get(buffer2);
+      return compareTo(buffer1, buffer2);
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/CarbonFileFolderComparator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonFileFolderComparator.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonFileFolderComparator.java
new file mode 100644
index 0000000..c60865d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonFileFolderComparator.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+import java.util.Comparator;
+
+import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile;
+
+public class CarbonFileFolderComparator implements Comparator<CarbonFile> {
+
+  /**
+   * Below method will be used to compare two file
+   *
+   * @param o1 first file
+   * @param o2 Second file
+   * @return compare result
+   */
+  @Override public int compare(CarbonFile o1, CarbonFile o2) {
+    String firstFileName = o1.getName();
+    String secondFileName = o2.getName();
+    int lastIndexOfO1 = firstFileName.lastIndexOf('_');
+    int lastIndexOfO2 = secondFileName.lastIndexOf('_');
+    int file1 = 0;
+    int file2 = 0;
+
+    try {
+      file1 = Integer.parseInt(firstFileName.substring(lastIndexOfO1 + 1));
+      file2 = Integer.parseInt(secondFileName.substring(lastIndexOfO2 + 1));
+    } catch (NumberFormatException e) {
+      return -1;
+    }
+    return (file1 < file2) ? -1 : (file1 == file2 ? 0 : 1);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
new file mode 100644
index 0000000..ac504f0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+public class CarbonLoadStatisticsDummy implements LoadStatistics {
+  private CarbonLoadStatisticsDummy() {
+
+  }
+
+  private static CarbonLoadStatisticsDummy carbonLoadStatisticsDummyInstance =
+      new CarbonLoadStatisticsDummy();
+
+  public static CarbonLoadStatisticsDummy getInstance() {
+    return carbonLoadStatisticsDummyInstance;
+  }
+
+  @Override
+  public void  initPartitonInfo(String PartitionId) {
+
+  }
+
+  @Override
+  public void recordDicShuffleAndWriteTime() {
+
+  }
+
+  @Override
+  public void recordLoadCsvfilesToDfTime() {
+
+  }
+
+  @Override
+  public void recordDictionaryValuesTotalTime(String partitionID,
+      Long dictionaryValuesTotalTimeTimePoint) {
+
+  }
+
+  @Override
+  public void recordCsvInputStepTime(String partitionID, Long csvInputStepTimePoint) {
+
+  }
+
+  @Override
+  public void recordLruCacheLoadTime(double lruCacheLoadTime) {
+
+  }
+
+  @Override
+  public void recordGeneratingDictionaryValuesTime(String partitionID,
+      Long generatingDictionaryValuesTimePoint) {
+
+  }
+
+  @Override
+  public void recordSortRowsStepTotalTime(String partitionID, Long sortRowsStepTotalTimePoint) {
+
+  }
+
+  @Override
+  public void recordMdkGenerateTotalTime(String partitionID, Long mdkGenerateTotalTimePoint) {
+
+  }
+
+  @Override
+  public void recordDictionaryValue2MdkAdd2FileTime(String partitionID,
+      Long dictionaryValue2MdkAdd2FileTimePoint) {
+
+  }
+
+  @Override
+  public void recordTotalRecords(long totalRecords) {
+
+  }
+
+  @Override
+  public void recordHostBlockMap(String host, Integer numBlocks) {
+
+  }
+
+  @Override
+  public void recordPartitionBlockMap(String partitionID, Integer numBlocks) {
+
+  }
+
+  @Override
+  public void printStatisticsInfo(String partitionID) {
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
new file mode 100644
index 0000000..c9fc8ba
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
@@ -0,0 +1,413 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+
+/**
+ * A util which provide methods used to record time information druing data loading.
+ */
+public class CarbonLoadStatisticsImpl implements LoadStatistics {
+  private CarbonLoadStatisticsImpl() {
+
+  }
+
+  private static CarbonLoadStatisticsImpl carbonLoadStatisticsImplInstance =
+          new CarbonLoadStatisticsImpl();
+
+  public static CarbonLoadStatisticsImpl getInstance() {
+    return carbonLoadStatisticsImplInstance;
+  }
+
+  private final LogService LOGGER =
+          LogServiceFactory.getLogService(CarbonLoadStatisticsImpl.class.getName());
+
+  /*
+   *We only care about the earliest start time(EST) and the latest end time(LET) of different
+   *threads, who does the same thing, LET - EST is the cost time of doing one thing using
+   *multiple thread.
+ */
+  private long loadCsvfilesToDfStartTime = 0;
+  private long loadCsvfilesToDfCostTime = 0;
+  private long dicShuffleAndWriteFileTotalStartTime = 0;
+  private long dicShuffleAndWriteFileTotalCostTime = 0;
+
+  //LRU cache load one time
+  private double lruCacheLoadTime = 0;
+
+  //Generate surrogate keys total time for each partition:
+  private ConcurrentHashMap<String, Long[]> parDictionaryValuesTotalTimeMap =
+          new ConcurrentHashMap<String, Long[]>();
+  private ConcurrentHashMap<String, Long[]> parCsvInputStepTimeMap =
+          new ConcurrentHashMap<String, Long[]>();
+  private ConcurrentHashMap<String, Long[]> parGeneratingDictionaryValuesTimeMap =
+          new ConcurrentHashMap<String, Long[]>();
+
+  //Sort rows step total time for each partition:
+  private ConcurrentHashMap<String, Long[]> parSortRowsStepTotalTimeMap =
+          new ConcurrentHashMap<String, Long[]>();
+
+  //MDK generate total time for each partition:
+  private ConcurrentHashMap<String, Long[]> parMdkGenerateTotalTimeMap =
+          new ConcurrentHashMap<String, Long[]>();
+  private ConcurrentHashMap<String, Long[]> parDictionaryValue2MdkAdd2FileTime =
+          new ConcurrentHashMap<String, Long[]>();
+
+  //Node block process information
+  private ConcurrentHashMap<String, Integer> hostBlockMap =
+          new ConcurrentHashMap<String, Integer>();
+
+  //Partition block process information
+  private ConcurrentHashMap<String, Integer> partitionBlockMap =
+          new ConcurrentHashMap<String, Integer>();
+
+  private long totalRecords = 0;
+  private double totalTime = 0;
+
+  @Override
+  public void initPartitonInfo(String PartitionId) {
+    parDictionaryValuesTotalTimeMap.put(PartitionId, new Long[2]);
+    parCsvInputStepTimeMap.put(PartitionId, new Long[2]);
+    parSortRowsStepTotalTimeMap.put(PartitionId, new Long[2]);
+    parGeneratingDictionaryValuesTimeMap.put(PartitionId, new Long[2]);
+    parMdkGenerateTotalTimeMap.put(PartitionId, new Long[2]);
+    parDictionaryValue2MdkAdd2FileTime.put(PartitionId, new Long[2]);
+  }
+
+  //Record the time
+  public void recordDicShuffleAndWriteTime() {
+    Long dicShuffleAndWriteTimePoint = System.currentTimeMillis();
+    if (0 == dicShuffleAndWriteFileTotalStartTime) {
+      dicShuffleAndWriteFileTotalStartTime = dicShuffleAndWriteTimePoint;
+    }
+    if (dicShuffleAndWriteTimePoint - dicShuffleAndWriteFileTotalStartTime >
+            dicShuffleAndWriteFileTotalCostTime) {
+      dicShuffleAndWriteFileTotalCostTime =
+          dicShuffleAndWriteTimePoint - dicShuffleAndWriteFileTotalStartTime;
+    }
+  }
+
+  public void recordLoadCsvfilesToDfTime() {
+    Long loadCsvfilesToDfTimePoint = System.currentTimeMillis();
+    if (0 == loadCsvfilesToDfStartTime) {
+      loadCsvfilesToDfStartTime = loadCsvfilesToDfTimePoint;
+    }
+    if (loadCsvfilesToDfTimePoint - loadCsvfilesToDfStartTime > loadCsvfilesToDfCostTime) {
+      loadCsvfilesToDfCostTime = loadCsvfilesToDfTimePoint - loadCsvfilesToDfStartTime;
+    }
+  }
+
+  public double getLruCacheLoadTime() {
+    return lruCacheLoadTime;
+  }
+
+  public void recordDictionaryValuesTotalTime(String partitionID,
+      Long dictionaryValuesTotalTimeTimePoint) {
+    if (null != parDictionaryValuesTotalTimeMap.get(partitionID)) {
+      if (null == parDictionaryValuesTotalTimeMap.get(partitionID)[0]) {
+        parDictionaryValuesTotalTimeMap.get(partitionID)[0] = dictionaryValuesTotalTimeTimePoint;
+      }
+      if (null == parDictionaryValuesTotalTimeMap.get(partitionID)[1] ||
+          dictionaryValuesTotalTimeTimePoint - parDictionaryValuesTotalTimeMap.get(partitionID)[0] >
+              parDictionaryValuesTotalTimeMap.get(partitionID)[1]) {
+        parDictionaryValuesTotalTimeMap.get(partitionID)[1] = dictionaryValuesTotalTimeTimePoint -
+            parDictionaryValuesTotalTimeMap.get(partitionID)[0];
+      }
+    }
+  }
+
+  public void recordCsvInputStepTime(String partitionID,
+      Long csvInputStepTimePoint) {
+    if (null != parCsvInputStepTimeMap.get(partitionID)) {
+      if (null == parCsvInputStepTimeMap.get(partitionID)[0]) {
+        parCsvInputStepTimeMap.get(partitionID)[0] = csvInputStepTimePoint;
+      }
+      if (null == parCsvInputStepTimeMap.get(partitionID)[1] ||
+              csvInputStepTimePoint - parCsvInputStepTimeMap.get(partitionID)[0] >
+                      parCsvInputStepTimeMap.get(partitionID)[1]) {
+        parCsvInputStepTimeMap.get(partitionID)[1] = csvInputStepTimePoint -
+                parCsvInputStepTimeMap.get(partitionID)[0];
+      }
+    }
+  }
+
+  public void recordLruCacheLoadTime(double lruCacheLoadTime) {
+    this.lruCacheLoadTime = lruCacheLoadTime;
+  }
+
+  public void recordGeneratingDictionaryValuesTime(String partitionID,
+      Long generatingDictionaryValuesTimePoint) {
+    if (null != parGeneratingDictionaryValuesTimeMap.get(partitionID)) {
+      if (null == parGeneratingDictionaryValuesTimeMap.get(partitionID)[0]) {
+        parGeneratingDictionaryValuesTimeMap.get(partitionID)[0] =
+                generatingDictionaryValuesTimePoint;
+      }
+      if (null == parGeneratingDictionaryValuesTimeMap.get(partitionID)[1] ||
+              generatingDictionaryValuesTimePoint - parGeneratingDictionaryValuesTimeMap
+                      .get(partitionID)[0] > parGeneratingDictionaryValuesTimeMap
+                      .get(partitionID)[1]) {
+        parGeneratingDictionaryValuesTimeMap.get(partitionID)[1] =
+                generatingDictionaryValuesTimePoint - parGeneratingDictionaryValuesTimeMap
+                        .get(partitionID)[0];
+      }
+    }
+  }
+
+  public void recordSortRowsStepTotalTime(String partitionID,
+                                          Long sortRowsStepTotalTimePoint) {
+    if (null != parSortRowsStepTotalTimeMap.get(partitionID)) {
+      if (null == parSortRowsStepTotalTimeMap.get(partitionID)[0]) {
+        parSortRowsStepTotalTimeMap.get(partitionID)[0] = sortRowsStepTotalTimePoint;
+      }
+      if (null == parSortRowsStepTotalTimeMap.get(partitionID)[1] ||
+              sortRowsStepTotalTimePoint - parSortRowsStepTotalTimeMap.get(partitionID)[0] >
+                      parSortRowsStepTotalTimeMap.get(partitionID)[1]) {
+        parSortRowsStepTotalTimeMap.get(partitionID)[1] = sortRowsStepTotalTimePoint -
+                parSortRowsStepTotalTimeMap.get(partitionID)[0];
+      }
+    }
+  }
+
+  public void recordMdkGenerateTotalTime(String partitionID,
+                                         Long mdkGenerateTotalTimePoint) {
+    if (null != parMdkGenerateTotalTimeMap.get(partitionID)) {
+      if (null == parMdkGenerateTotalTimeMap.get(partitionID)[0]) {
+        parMdkGenerateTotalTimeMap.get(partitionID)[0] = mdkGenerateTotalTimePoint;
+      }
+      if (null == parMdkGenerateTotalTimeMap.get(partitionID)[1] ||
+              mdkGenerateTotalTimePoint - parMdkGenerateTotalTimeMap.get(partitionID)[0] >
+                      parMdkGenerateTotalTimeMap.get(partitionID)[1]) {
+        parMdkGenerateTotalTimeMap.get(partitionID)[1] = mdkGenerateTotalTimePoint -
+                parMdkGenerateTotalTimeMap.get(partitionID)[0];
+      }
+    }
+  }
+
+  public void recordDictionaryValue2MdkAdd2FileTime(String partitionID,
+      Long dictionaryValue2MdkAdd2FileTimePoint) {
+    if (null != parDictionaryValue2MdkAdd2FileTime.get(partitionID)) {
+      if (null == parDictionaryValue2MdkAdd2FileTime.get(partitionID)[0]) {
+        parDictionaryValue2MdkAdd2FileTime.get(partitionID)[0] =
+                dictionaryValue2MdkAdd2FileTimePoint;
+      }
+      if (null == parDictionaryValue2MdkAdd2FileTime.get(partitionID)[1] ||
+              dictionaryValue2MdkAdd2FileTimePoint - parDictionaryValue2MdkAdd2FileTime
+                      .get(partitionID)[0] > parDictionaryValue2MdkAdd2FileTime
+                      .get(partitionID)[1]) {
+        parDictionaryValue2MdkAdd2FileTime.get(partitionID)[1] =
+                dictionaryValue2MdkAdd2FileTimePoint - parDictionaryValue2MdkAdd2FileTime
+                        .get(partitionID)[0];
+      }
+    }
+  }
+
+  //Record the node blocks information map
+  public void recordHostBlockMap(String host, Integer numBlocks) {
+    hostBlockMap.put(host, numBlocks);
+  }
+
+  //Record the partition blocks information map
+  public void recordPartitionBlockMap(String partitionID, Integer numBlocks) {
+    partitionBlockMap.put(partitionID, numBlocks);
+  }
+
+  public void recordTotalRecords(long totalRecords) {
+    this.totalRecords = totalRecords;
+  }
+
+  //Get the time
+  private double getDicShuffleAndWriteFileTotalTime() {
+    return dicShuffleAndWriteFileTotalCostTime / 1000.0;
+  }
+
+  private double getLoadCsvfilesToDfTime() {
+    return loadCsvfilesToDfCostTime / 1000.0;
+  }
+
+  private double getDictionaryValuesTotalTime(String partitionID) {
+    return parDictionaryValuesTotalTimeMap.get(partitionID)[1] / 1000.0;
+  }
+
+  private double getCsvInputStepTime(String partitionID) {
+    return parCsvInputStepTimeMap.get(partitionID)[1] / 1000.0;
+  }
+
+  private double getGeneratingDictionaryValuesTime(String partitionID) {
+    return parGeneratingDictionaryValuesTimeMap.get(partitionID)[1] / 1000.0;
+  }
+
+  private double getSortRowsStepTotalTime(String partitionID) {
+    return parSortRowsStepTotalTimeMap.get(partitionID)[1] / 1000.0;
+  }
+
+  private double getDictionaryValue2MdkAdd2FileTime(String partitionID) {
+    return parDictionaryValue2MdkAdd2FileTime.get(partitionID)[1] / 1000.0;
+  }
+
+  //Get the hostBlockMap
+  private ConcurrentHashMap<String, Integer> getHostBlockMap() {
+    return hostBlockMap;
+  }
+
+  //Get the partitionBlockMap
+  private ConcurrentHashMap<String, Integer> getPartitionBlockMap() {
+    return partitionBlockMap;
+  }
+
+  //Speed calculate
+  private long getTotalRecords() {
+    return this.totalRecords;
+  }
+
+  private int getLoadSpeed() {
+    return (int)(totalRecords / totalTime);
+  }
+
+  private int getGenDicSpeed() {
+    return (int)(totalRecords / getLoadCsvfilesToDfTime() + getDicShuffleAndWriteFileTotalTime());
+  }
+
+  private int getReadCSVSpeed(String partitionID) {
+    return (int)(totalRecords / getCsvInputStepTime(partitionID));
+  }
+
+  private int getGenSurKeySpeed(String partitionID) {
+    return (int)(totalRecords / getGeneratingDictionaryValuesTime(partitionID));
+  }
+
+  private int getSortKeySpeed(String partitionID) {
+    return (int)(totalRecords / getSortRowsStepTotalTime(partitionID));
+  }
+
+  private int getMDKSpeed(String partitionID) {
+    return (int)(totalRecords / getDictionaryValue2MdkAdd2FileTime(partitionID));
+  }
+
+  private double getTotalTime(String partitionID) {
+    this.totalTime = getLoadCsvfilesToDfTime() + getDicShuffleAndWriteFileTotalTime() +
+        getLruCacheLoadTime() + getDictionaryValuesTotalTime(partitionID) +
+        getDictionaryValue2MdkAdd2FileTime(partitionID);
+    return totalTime;
+  }
+
+  //Print the statistics information
+  private void printDicGenStatisticsInfo() {
+    double loadCsvfilesToDfTime = getLoadCsvfilesToDfTime();
+    LOGGER.audit("STAGE 1 ->Load csv to DataFrame and generate" +
+            " block distinct values: " + loadCsvfilesToDfTime + "(s)");
+    double dicShuffleAndWriteFileTotalTime = getDicShuffleAndWriteFileTotalTime();
+    LOGGER.audit("STAGE 2 ->Global dict shuffle and write dict file: " +
+            + dicShuffleAndWriteFileTotalTime + "(s)");
+  }
+
+  private void printLruCacheLoadTimeInfo() {
+    LOGGER.audit("STAGE 3 ->LRU cache load: " + getLruCacheLoadTime() + "(s)");
+  }
+
+  private void printDictionaryValuesGenStatisticsInfo(String partitionID) {
+    double dictionaryValuesTotalTime = getDictionaryValuesTotalTime(partitionID);
+    LOGGER.audit("STAGE 4 ->Total cost of gen dictionary values, sort and write to temp files: "
+            + dictionaryValuesTotalTime + "(s)");
+    double csvInputStepTime = getCsvInputStepTime(partitionID);
+    double generatingDictionaryValuesTime = getGeneratingDictionaryValuesTime(partitionID);
+    LOGGER.audit("STAGE 4.1 ->  |_read csv file: " + csvInputStepTime + "(s)");
+    LOGGER.audit("STAGE 4.2 ->  |_transform to surrogate key: "
+            + generatingDictionaryValuesTime + "(s)");
+  }
+
+  private void printSortRowsStepStatisticsInfo(String partitionID) {
+    double sortRowsStepTotalTime = getSortRowsStepTotalTime(partitionID);
+    LOGGER.audit("STAGE 4.3 ->  |_sort rows and write to temp file: "
+            + sortRowsStepTotalTime + "(s)");
+  }
+
+  private void printGenMdkStatisticsInfo(String partitionID) {
+    double dictionaryValue2MdkAdd2FileTime = getDictionaryValue2MdkAdd2FileTime(partitionID);
+    LOGGER.audit("STAGE 5 ->Transform to MDK, compress and write fact files: "
+            + dictionaryValue2MdkAdd2FileTime + "(s)");
+  }
+
+  //Print the node blocks information
+  private void printHostBlockMapInfo() {
+    LOGGER.audit("========== BLOCK_INFO ==========");
+    if (getHostBlockMap().size() > 0) {
+      for (String host: getHostBlockMap().keySet()) {
+        LOGGER.audit("BLOCK_INFO ->Node host: " + host);
+        LOGGER.audit("BLOCK_INFO ->The block count in this node: " + getHostBlockMap().get(host));
+      }
+    } else if (getPartitionBlockMap().size() > 0) {
+      for (String parID: getPartitionBlockMap().keySet()) {
+        LOGGER.audit("BLOCK_INFO ->Partition ID: " + parID);
+        LOGGER.audit("BLOCK_INFO ->The block count in this partition: " +
+                getPartitionBlockMap().get(parID));
+      }
+    }
+  }
+
+  //Print the speed information
+  private void printLoadSpeedInfo(String partitionID) {
+    LOGGER.audit("===============Load_Speed_Info===============");
+    LOGGER.audit("Total Num of Records Processed: " + getTotalRecords());
+    LOGGER.audit("Total Time Cost: " + getTotalTime(partitionID) + "(s)");
+    LOGGER.audit("Total Load Speed: " + getLoadSpeed() + "records/s");
+    LOGGER.audit("Generate Dictionaries Speed: " + getGenDicSpeed() + "records/s");
+    LOGGER.audit("Read CSV Speed: " + getReadCSVSpeed(partitionID) + " records/s");
+    LOGGER.audit("Generate Surrogate Key Speed: " + getGenSurKeySpeed(partitionID) + " records/s");
+    LOGGER.audit("Sort Key/Write Temp Files Speed: " + getSortKeySpeed(partitionID) + " records/s");
+    LOGGER.audit("MDK Step Speed: " + getMDKSpeed(partitionID) + " records/s");
+    LOGGER.audit("=============================================");
+  }
+
+  public void printStatisticsInfo(String partitionID) {
+    try {
+      LOGGER.audit("========== TIME_STATISTICS PartitionID: " + partitionID + "==========");
+      printDicGenStatisticsInfo();
+      printLruCacheLoadTimeInfo();
+      printDictionaryValuesGenStatisticsInfo(partitionID);
+      printSortRowsStepStatisticsInfo(partitionID);
+      printGenMdkStatisticsInfo(partitionID);
+      printHostBlockMapInfo();
+      printLoadSpeedInfo(partitionID);
+    } catch (Exception e) {
+      LOGGER.audit("Can't print Statistics Information");
+    } finally {
+      resetLoadStatistics();
+    }
+  }
+
+  //Reset the load statistics values
+  private void resetLoadStatistics() {
+    loadCsvfilesToDfStartTime = 0;
+    loadCsvfilesToDfCostTime = 0;
+    dicShuffleAndWriteFileTotalStartTime = 0;
+    dicShuffleAndWriteFileTotalCostTime = 0;
+    lruCacheLoadTime = 0;
+    totalRecords = 0;
+    totalTime = 0;
+    parDictionaryValuesTotalTimeMap.clear();
+    parCsvInputStepTimeMap.clear();
+    parSortRowsStepTotalTimeMap.clear();
+    parGeneratingDictionaryValuesTimeMap.clear();
+    parMdkGenerateTotalTimeMap.clear();
+    parDictionaryValue2MdkAdd2FileTime.clear();
+  }
+
+}



[43/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonTablePath.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonTablePath.java b/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonTablePath.java
new file mode 100644
index 0000000..80a39f1
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/path/CarbonTablePath.java
@@ -0,0 +1,425 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.path;
+
+import java.io.File;
+
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile;
+import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFileFilter;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+
+import static org.apache.carbondata.core.constants.CarbonCommonConstants.INVALID_SEGMENT_ID;
+
+import org.apache.hadoop.fs.Path;
+
+
+/**
+ * Helps to get Table content paths.
+ */
+public class CarbonTablePath extends Path {
+
+  protected static final String METADATA_DIR = "Metadata";
+  protected static final String DICTIONARY_EXT = ".dict";
+  protected static final String DICTIONARY_META_EXT = ".dictmeta";
+  protected static final String SORT_INDEX_EXT = ".sortindex";
+  protected static final String SCHEMA_FILE = "schema";
+  protected static final String TABLE_STATUS_FILE = "tablestatus";
+  protected static final String FACT_DIR = "Fact";
+  protected static final String AGGREGATE_TABLE_PREFIX = "Agg";
+  protected static final String SEGMENT_PREFIX = "Segment_";
+  protected static final String PARTITION_PREFIX = "Part";
+  protected static final String CARBON_DATA_EXT = ".carbondata";
+  protected static final String DATA_PART_PREFIX = "part";
+  protected static final String INDEX_FILE_EXT = ".carbonindex";
+
+  protected String tablePath;
+  protected CarbonTableIdentifier carbonTableIdentifier;
+
+  /**
+   *
+   * @param carbonTableIdentifier
+   * @param tablePathString
+   */
+  public CarbonTablePath(CarbonTableIdentifier carbonTableIdentifier, String tablePathString) {
+    super(tablePathString);
+    this.carbonTableIdentifier = carbonTableIdentifier;
+    this.tablePath = tablePathString;
+  }
+
+  /**
+   * The method returns the folder path containing the carbon file.
+   *
+   * @param carbonFilePath
+   */
+  public static String getFolderContainingFile(String carbonFilePath) {
+    return carbonFilePath.substring(0, carbonFilePath.lastIndexOf(File.separator));
+  }
+
+  /**
+   * @param columnId unique column identifier
+   * @return name of dictionary file
+   */
+  public static String getDictionaryFileName(String columnId) {
+    return columnId + DICTIONARY_EXT;
+  }
+
+  /**
+   * whether carbonFile is dictionary file or not
+   *
+   * @param carbonFile
+   * @return
+   */
+  public static Boolean isDictionaryFile(CarbonFile carbonFile) {
+    return (!carbonFile.isDirectory()) && (carbonFile.getName().endsWith(DICTIONARY_EXT));
+  }
+
+  /**
+   * check if it is carbon data file matching extension
+   *
+   * @param fileNameWithPath
+   * @return boolean
+   */
+  public static boolean isCarbonDataFile(String fileNameWithPath) {
+    int pos = fileNameWithPath.lastIndexOf('.');
+    if (pos != -1) {
+      return fileNameWithPath.substring(pos).startsWith(CARBON_DATA_EXT);
+    }
+    return false;
+  }
+
+  /**
+   * check if it is carbon index file matching extension
+   *
+   * @param fileNameWithPath
+   * @return boolean
+   */
+  public static boolean isCarbonIndexFile(String fileNameWithPath) {
+    int pos = fileNameWithPath.lastIndexOf('.');
+    if (pos != -1) {
+      return fileNameWithPath.substring(pos).startsWith(INDEX_FILE_EXT);
+    }
+    return false;
+  }
+
+  /**
+   * gets table path
+   */
+  public String getPath() {
+    return tablePath;
+  }
+
+  /**
+   * @param columnId unique column identifier
+   * @return absolute path of dictionary file
+   */
+  public String getDictionaryFilePath(String columnId) {
+    return getMetaDataDir() + File.separator + getDictionaryFileName(columnId);
+  }
+
+  /**
+   * @return it return relative directory
+   */
+  public String getRelativeDictionaryDirectory() {
+    return carbonTableIdentifier.getDatabaseName() + File.separator + carbonTableIdentifier
+        .getTableName();
+  }
+
+  /**
+   * This method will return the metadata directory location for a table
+   *
+   * @return
+   */
+  public String getMetadataDirectoryPath() {
+    return getMetaDataDir();
+  }
+
+  /**
+   * @param columnId unique column identifier
+   * @return absolute path of dictionary meta file
+   */
+  public String getDictionaryMetaFilePath(String columnId) {
+    return getMetaDataDir() + File.separator + columnId + DICTIONARY_META_EXT;
+  }
+
+  /**
+   * @param columnId unique column identifier
+   * @return absolute path of sort index file
+   */
+  public String getSortIndexFilePath(String columnId) {
+    return getMetaDataDir() + File.separator + columnId + SORT_INDEX_EXT;
+  }
+
+  /**
+   *
+   * @param columnId
+   * @param dictOffset
+   * @return absolute path of sortindex with appeneded dictionary offset
+   */
+  public String getSortIndexFilePath(String columnId, long dictOffset) {
+    return getMetaDataDir() + File.separator + columnId + "_" + dictOffset + SORT_INDEX_EXT;
+  }
+
+  /**
+   * @return absolute path of schema file
+   */
+  public String getSchemaFilePath() {
+    return getMetaDataDir() + File.separator + SCHEMA_FILE;
+  }
+
+  /**
+   * @return absolute path of table status file
+   */
+  public String getTableStatusFilePath() {
+    return getMetaDataDir() + File.separator + TABLE_STATUS_FILE;
+  }
+
+  /**
+   * Gets absolute path of data file
+   *
+   * @param partitionId         unique partition identifier
+   * @param segmentId           unique partition identifier
+   * @param filePartNo          data file part number
+   * @param factUpdateTimeStamp unique identifier to identify an update
+   * @return absolute path of data file stored in carbon data format
+   */
+  public String getCarbonDataFilePath(String partitionId, String segmentId, Integer filePartNo,
+      Integer taskNo, String factUpdateTimeStamp) {
+    return getSegmentDir(partitionId, segmentId) + File.separator + getCarbonDataFileName(
+        filePartNo, taskNo, factUpdateTimeStamp);
+  }
+
+  /**
+   * Below method will be used to get the index file present in the segment folder
+   * based on task id
+   *
+   * @param taskId      task id of the file
+   * @param partitionId partition number
+   * @param segmentId   segment number
+   * @return full qualified carbon index path
+   */
+  public String getCarbonIndexFilePath(final String taskId, final String partitionId,
+      final String segmentId) {
+    String segmentDir = getSegmentDir(partitionId, segmentId);
+    CarbonFile carbonFile =
+        FileFactory.getCarbonFile(segmentDir, FileFactory.getFileType(segmentDir));
+
+    CarbonFile[] files = carbonFile.listFiles(new CarbonFileFilter() {
+      @Override public boolean accept(CarbonFile file) {
+        return file.getName().startsWith(taskId) && file.getName().endsWith(INDEX_FILE_EXT);
+      }
+    });
+    return files[0].getAbsolutePath();
+  }
+
+  /**
+   * Gets absolute path of data file
+   *
+   * @param partitionId unique partition identifier
+   * @param segmentId   unique partition identifier
+   * @return absolute path of data file stored in carbon data format
+   */
+  public String getCarbonDataDirectoryPath(String partitionId, String segmentId) {
+    return getSegmentDir(partitionId, segmentId);
+  }
+
+  /**
+   * Gets absolute path of data file of given aggregate table
+   *
+   * @param aggTableID          unique aggregate table identifier
+   * @param partitionId         unique partition identifier
+   * @param segmentId           unique partition identifier
+   * @param filePartNo          data file part number
+   * @param factUpdateTimeStamp unique identifier to identify an update
+   * @return absolute path of data file stored in carbon data format
+   */
+  public String getCarbonAggDataFilePath(String aggTableID, String partitionId, String segmentId,
+      Integer filePartNo, Integer taskNo, String factUpdateTimeStamp) {
+    return getAggSegmentDir(aggTableID, partitionId, segmentId) + File.separator
+        + getCarbonDataFileName(filePartNo, taskNo, factUpdateTimeStamp);
+  }
+
+  /**
+   * Gets data file name only with out path
+   *
+   * @param filePartNo          data file part number
+   * @param taskNo              task identifier
+   * @param factUpdateTimeStamp unique identifier to identify an update
+   * @return gets data file name only with out path
+   */
+  public String getCarbonDataFileName(Integer filePartNo, Integer taskNo,
+      String factUpdateTimeStamp) {
+    return DATA_PART_PREFIX + "-" + filePartNo + "-" + taskNo + "-" + factUpdateTimeStamp
+        + CARBON_DATA_EXT;
+  }
+
+  /**
+   * Below method will be used to get the carbon index filename
+   *
+   * @param taskNo               task number
+   * @param factUpdatedTimeStamp time stamp
+   * @return filename
+   */
+  public String getCarbonIndexFileName(int taskNo, String factUpdatedTimeStamp) {
+    return taskNo + "-" + factUpdatedTimeStamp + INDEX_FILE_EXT;
+  }
+
+  private String getSegmentDir(String partitionId, String segmentId) {
+    return getPartitionDir(partitionId) + File.separator + SEGMENT_PREFIX + segmentId;
+  }
+
+  public String getPartitionDir(String partitionId) {
+    return getFactDir() + File.separator + PARTITION_PREFIX + partitionId;
+  }
+
+  private String getAggSegmentDir(String aggTableID, String partitionId, String segmentId) {
+    return getAggPartitionDir(aggTableID, partitionId) + File.separator + SEGMENT_PREFIX
+        + segmentId;
+  }
+
+  private String getAggPartitionDir(String aggTableID, String partitionId) {
+    return getAggregateTableDir(aggTableID) + File.separator + PARTITION_PREFIX + partitionId;
+  }
+
+  private String getMetaDataDir() {
+    return tablePath + File.separator + METADATA_DIR;
+  }
+
+  public String getFactDir() {
+    return tablePath + File.separator + FACT_DIR;
+  }
+
+  private String getAggregateTableDir(String aggTableId) {
+    return tablePath + File.separator + AGGREGATE_TABLE_PREFIX + aggTableId;
+  }
+
+  @Override public boolean equals(Object o) {
+    if (!(o instanceof CarbonTablePath)) {
+      return false;
+    }
+    CarbonTablePath path = (CarbonTablePath) o;
+    return tablePath.equals(path.tablePath) && super.equals(o);
+  }
+
+  @Override public int hashCode() {
+    return super.hashCode() + tablePath.hashCode();
+  }
+
+  /**
+   * To manage data file name and composition
+   */
+  public static class DataFileUtil {
+
+    /**
+     * gets updated timestamp information from given carbon data file name
+     */
+    public static String getUpdateTimeStamp(String carbonDataFileName) {
+      // Get the file name from path
+      String fileName = getFileName(carbonDataFileName);
+      // + 1 for size of "-"
+      int firstDashPos = fileName.indexOf("-");
+      int secondDashPos = fileName.indexOf("-", firstDashPos + 1);
+      int startIndex = fileName.indexOf("-", secondDashPos + 1) + 1;
+      int endIndex = fileName.indexOf(".");
+      return fileName.substring(startIndex, endIndex);
+    }
+
+    /**
+     * gets file part number information from given carbon data file name
+     */
+    public static String getPartNo(String carbonDataFileName) {
+      // Get the file name from path
+      String fileName = getFileName(carbonDataFileName);
+      // + 1 for size of "-"
+      int startIndex = fileName.indexOf("-") + 1;
+      int endIndex = fileName.indexOf("-", startIndex);
+      return fileName.substring(startIndex, endIndex);
+    }
+
+    /**
+     * gets updated timestamp information from given carbon data file name
+     */
+    public static String getTaskNo(String carbonDataFileName) {
+      // Get the file name from path
+      String fileName = getFileName(carbonDataFileName);
+      // + 1 for size of "-"
+      int firstDashPos = fileName.indexOf("-");
+      int startIndex = fileName.indexOf("-", firstDashPos + 1) + 1;
+      int endIndex = fileName.indexOf("-", startIndex);
+      return fileName.substring(startIndex, endIndex);
+    }
+
+    /**
+     * Gets the file name from file path
+     */
+    private static String getFileName(String carbonDataFileName) {
+      int endIndex = carbonDataFileName.lastIndexOf(CarbonCommonConstants.FILE_SEPARATOR);
+      if (endIndex > -1) {
+        return carbonDataFileName.substring(endIndex + 1, carbonDataFileName.length());
+      } else {
+        return carbonDataFileName;
+      }
+    }
+  }
+
+  /**
+   * To manage data path and composition
+   */
+  public static class DataPathUtil {
+
+    /**
+     * gets segement id from given absolute data file path
+     */
+    public static String getSegmentId(String dataFileAbsolutePath) {
+      // find segment id from last of data file path
+      int endIndex = dataFileAbsolutePath.lastIndexOf(CarbonCommonConstants.FILE_SEPARATOR);
+      // + 1 for size of "/"
+      int startIndex =
+          dataFileAbsolutePath.lastIndexOf(CarbonCommonConstants.FILE_SEPARATOR, endIndex - 1) + 1;
+      String segmentDirStr = dataFileAbsolutePath.substring(startIndex, endIndex);
+      //identify id in segment_<id>
+      String[] segmentDirSplits = segmentDirStr.split("_");
+      try {
+        if (segmentDirSplits.length == 2) {
+          return segmentDirSplits[1];
+        }
+      } catch (Exception e) {
+        return INVALID_SEGMENT_ID;
+      }
+      return INVALID_SEGMENT_ID;
+    }
+  }
+
+  /**
+   * Below method will be used to get sort index file present in mentioned folder
+   *
+   * @param sortIndexDir directory where sort index file resides
+   * @param columnUniqueId   columnunique id
+   * @return sort index carbon files
+   */
+  public CarbonFile[] getSortIndexFiles(CarbonFile sortIndexDir, final String columnUniqueId) {
+    CarbonFile[] files = sortIndexDir.listFiles(new CarbonFileFilter() {
+      @Override public boolean accept(CarbonFile file) {
+        return file.getName().startsWith(columnUniqueId) && file.getName().endsWith(SORT_INDEX_EXT);
+      }
+    });
+    return files;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/querystatistics/QueryStatistic.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/querystatistics/QueryStatistic.java b/core/src/main/java/org/apache/carbondata/core/carbon/querystatistics/QueryStatistic.java
new file mode 100644
index 0000000..39198c8
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/querystatistics/QueryStatistic.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.querystatistics;
+
+import java.io.Serializable;
+
+/**
+ * Wrapper class to maintain the query statistics for each phase of the query
+ */
+public class QueryStatistic implements Serializable {
+
+  /**
+   * serialization id
+   */
+  private static final long serialVersionUID = -5667106646135905848L;
+
+  /**
+   * statistic message
+   */
+  private String message;
+
+  /**
+   * total time take of the phase
+   */
+  private long timeTaken;
+
+  /**
+   * starttime of the phase
+   */
+  private long startTime;
+
+  public QueryStatistic() {
+    this.startTime = System.currentTimeMillis();
+  }
+
+  /**
+   * below method will be used to add the statistic
+   *
+   * @param message     Statistic message
+   * @param currentTime current time
+   */
+  public void addStatistics(String message, long currentTime) {
+    this.timeTaken = currentTime - startTime;
+    this.message = message;
+  }
+
+  /**
+   * Below method will be used to add fixed time statistic.
+   * For example total time taken for scan or result preparation
+   *
+   * @param message   statistic message
+   * @param timetaken
+   */
+  public void addFixedTimeStatistic(String message, long timetaken) {
+    this.timeTaken = timetaken;
+    this.message = message;
+  }
+
+  /**
+   * Below method will be used to get the statistic message, which will
+   * be used to log
+   *
+   * @param queryWithTaskId query with task id to append in the message
+   * @return statistic message
+   */
+  public String getStatistics(String queryWithTaskId) {
+    return message + " for the taskid : " + queryWithTaskId + " Is : " + timeTaken;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/querystatistics/QueryStatisticsRecorder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/querystatistics/QueryStatisticsRecorder.java b/core/src/main/java/org/apache/carbondata/core/carbon/querystatistics/QueryStatisticsRecorder.java
new file mode 100644
index 0000000..ce12cae
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/querystatistics/QueryStatisticsRecorder.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.querystatistics;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+
+/**
+ * Class will be used to record and log the query statistics
+ */
+public class QueryStatisticsRecorder implements Serializable {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(QueryStatisticsRecorder.class.getName());
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = -5719752001674467864L;
+
+  /**
+   * list for statistics to record time taken
+   * by each phase of the query for example aggregation
+   * scanning,block loading time etc.
+   */
+  private List<QueryStatistic> queryStatistics;
+
+  /**
+   * query with taskd
+   */
+  private String queryIWthTask;
+
+  public QueryStatisticsRecorder(String queryId) {
+    queryStatistics = new ArrayList<QueryStatistic>();
+    this.queryIWthTask = queryId;
+  }
+
+  /**
+   * Below method will be used to add the statistics
+   *
+   * @param statistic
+   */
+  public synchronized void recordStatistics(QueryStatistic statistic) {
+    queryStatistics.add(statistic);
+  }
+
+  /**
+   * Below method will be used to log the statistic
+   */
+  public void logStatistics() {
+    for (QueryStatistic statistic : queryStatistics) {
+      LOGGER.statistic(statistic.getStatistics(queryIWthTask));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
new file mode 100644
index 0000000..4fa77ba
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -0,0 +1,892 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.constants;
+
+public final class CarbonCommonConstants {
+  /**
+   * integer size in bytes
+   */
+  public static final int INT_SIZE_IN_BYTE = 4;
+  /**
+   * short size in bytes
+   */
+  public static final int SHORT_SIZE_IN_BYTE = 2;
+  /**
+   * DOUBLE size in bytes
+   */
+  public static final int DOUBLE_SIZE_IN_BYTE = 8;
+  /**
+   * LONG size in bytes
+   */
+  public static final int LONG_SIZE_IN_BYTE = 8;
+  /**
+   * byte to KB conversion factor
+   */
+  public static final int BYTE_TO_KB_CONVERSION_FACTOR = 1024;
+  /**
+   * BYTE_ENCODING
+   */
+  public static final String BYTE_ENCODING = "ISO-8859-1";
+  /**
+   * measure meta data file name
+   */
+  public static final String MEASURE_METADATA_FILE_NAME = "/msrMetaData_";
+  /**
+   * location of the carbon member, hierarchy and fact files
+   */
+  public static final String STORE_LOCATION = "carbon.storelocation";
+  /**
+   * blocklet size in carbon file
+   */
+  public static final String BLOCKLET_SIZE = "carbon.blocklet.size";
+  /**
+   * TODO: max number of blocklets written in a single file?
+   */
+  public static final String MAX_FILE_SIZE = "carbon.max.file.size";
+  /**
+   * Number of cores to be used
+   */
+  public static final String NUM_CORES = "carbon.number.of.cores";
+  /**
+   * carbon sort size
+   */
+  public static final String SORT_SIZE = "carbon.sort.size";
+  /**
+   * default location of the carbon member, hierarchy and fact files
+   */
+  public static final String STORE_LOCATION_DEFAULT_VAL = "../carbon.store";
+  /**
+   * the folder name of kettle home path
+   */
+  public static final String KETTLE_HOME_NAME = "carbonplugins";
+  /**
+   * CARDINALITY_INCREMENT_DEFAULT_VALUE
+   */
+  public static final int CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL = 10;
+  /**
+   * default blocklet size
+   */
+  public static final String BLOCKLET_SIZE_DEFAULT_VAL = "120000";
+  /**
+   * min blocklet size
+   */
+  public static final int BLOCKLET_SIZE_MIN_VAL = 50;
+  /**
+   * max blocklet size
+   */
+  public static final int BLOCKLET_SIZE_MAX_VAL = 12000000;
+  /**
+   * TODO: default value of max number of blocklet written in a single file?
+   */
+  public static final String MAX_FILE_SIZE_DEFAULT_VAL = "1024";
+  /**
+   * TODO: min value of max number of blocklets written in a single file?
+   */
+  public static final int MAX_FILE_SIZE_DEFAULT_VAL_MIN_VAL = 1;
+  /**
+   * max allowed block size for a file. If block size is greater than this value
+   * then the value is reset to default block size for a file
+   */
+  public static final int MAX_FILE_SIZE_DEFAULT_VAL_MAX_VAL = 2048;
+  /**
+   * default value of number of cores to be used
+   */
+  public static final String NUM_CORES_DEFAULT_VAL = "2";
+  /**
+   * min value of number of cores to be used
+   */
+  public static final int NUM_CORES_MIN_VAL = 1;
+  /**
+   * max value of number of cores to be used
+   */
+  public static final int NUM_CORES_MAX_VAL = 32;
+  /**
+   * default carbon sort size
+   */
+  public static final String SORT_SIZE_DEFAULT_VAL = "100000";
+  /**
+   * min carbon sort size
+   */
+  public static final int SORT_SIZE_MIN_VAL = 1000;
+  /**
+   * carbon properties file path
+   */
+  public static final String CARBON_PROPERTIES_FILE_PATH = "../../../conf/carbon.properties";
+  /**
+   * CARBON_DDL_BASE_HDFS_URL
+   */
+  public static final String CARBON_DDL_BASE_HDFS_URL = "carbon.ddl.base.hdfs.url";
+  /**
+   * Slice Meta data file.
+   */
+  public static final String SLICE_METADATA_FILENAME = "sliceMetaData";
+  /**
+   * Load Folder Name
+   */
+  public static final String LOAD_FOLDER = "Segment_";
+  /**
+   * RESTructure Folder
+   */
+  public static final String RESTRUCTRE_FOLDER = "RS_";
+  /**
+   * BYTEBUFFER_SIZE
+   */
+
+  public static final int BYTEBUFFER_SIZE = 24 * 1024;
+  /**
+   * Average constant
+   */
+  public static final String AVERAGE = "avg";
+  /**
+   * Count constant
+   */
+  public static final String COUNT = "count";
+  /**
+   * Count constant
+   */
+  public static final String COUNT_STAR = "countstar";
+  /**
+   * Max constant
+   */
+  public static final String MAX = "max";
+  /**
+   * Min constant
+   */
+  public static final String MIN = "min";
+  /**
+   * distinct count
+   */
+  public static final String DISTINCT_COUNT = "distinct-count";
+  /**
+   * CUSTOM
+   */
+  public static final String CUSTOM = "custom";
+  /**
+   * SUM
+   */
+  public static final String SUM = "sum";
+  /**
+   * DUMMY aggregation function
+   */
+  public static final String DUMMY = "dummy";
+  /**
+   * MEMBER_DEFAULT_VAL
+   */
+  public static final String MEMBER_DEFAULT_VAL = "@NU#LL$!";
+  /**
+   * BLANK_LINE_FLAG
+   */
+  public static final String BLANK_LINE_FLAG = "@NU#LL$!BLANKLINE";
+  /**
+   * FILE STATUS IN-PROGRESS
+   */
+  public static final String FILE_INPROGRESS_STATUS = ".inprogress";
+  /**
+   * CARBON_BADRECORDS_LOCATION
+   */
+  public static final String CARBON_BADRECORDS_LOC = "carbon.badRecords.location";
+  /**
+   * CARBON_BADRECORDS_LOCATION_DEFAULT
+   */
+  public static final String CARBON_BADRECORDS_LOC_DEFAULT_VAL =
+      "../unibi-solutions/system/carbon/badRecords";
+  /**
+   * HIERARCHY_FILE_EXTENSION
+   */
+  public static final String HIERARCHY_FILE_EXTENSION = ".hierarchy";
+  /**
+   * SORT_TEMP_FILE_LOCATION
+   */
+  public static final String SORT_TEMP_FILE_LOCATION = "sortrowtmp";
+  /**
+   * CARBON_RESULT_SIZE_DEFAULT
+   */
+  public static final String LEVEL_FILE_EXTENSION = ".level";
+  /**
+   * FACT_FILE_EXT
+   */
+  public static final String FACT_FILE_EXT = ".carbondata";
+  /**
+   * MEASUREMETADATA_FILE_EXT
+   */
+  public static final String MEASUREMETADATA_FILE_EXT = ".msrmetadata";
+  /**
+   * GRAPH_ROWSET_SIZE
+   */
+  public static final String GRAPH_ROWSET_SIZE = "carbon.graph.rowset.size";
+  /**
+   * GRAPH_ROWSET_SIZE_DEFAULT
+   */
+  public static final String GRAPH_ROWSET_SIZE_DEFAULT = "500";
+  /**
+   * Comment for <code>TYPE_MYSQL</code>
+   */
+  public static final String TYPE_MYSQL = "MYSQL";
+  /**
+   * Comment for <code>TYPE_MSSQL</code>
+   */
+  public static final String TYPE_MSSQL = "MSSQL";
+  /**
+   * Comment for <code>TYPE_ORACLE</code>
+   */
+  public static final String TYPE_ORACLE = "ORACLE";
+  /**
+   * Comment for <code>TYPE_SYBASE</code>
+   */
+  public static final String TYPE_SYBASE = "SYBASE";
+  /**
+   * SORT_INTERMEDIATE_FILES_LIMIT
+   */
+  public static final String SORT_INTERMEDIATE_FILES_LIMIT = "carbon.sort.intermediate.files.limit";
+  /**
+   * SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE
+   */
+  public static final String SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE = "20";
+  /**
+   * MERGERD_EXTENSION
+   */
+  public static final String MERGERD_EXTENSION = ".merge";
+  /**
+   * SORT_FILE_BUFFER_SIZE
+   */
+  public static final String SORT_FILE_BUFFER_SIZE = "carbon.sort.file.buffer.size";
+  /**
+   * no.of records after which counter to be printed
+   */
+  public static final String DATA_LOAD_LOG_COUNTER = "carbon.load.log.counter";
+  /**
+   * DATA_LOAD_LOG_COUNTER_DEFAULT_COUNTER
+   */
+  public static final String DATA_LOAD_LOG_COUNTER_DEFAULT_COUNTER = "500000";
+  /**
+   * SORT_FILE_WRITE_BUFFER_SIZE
+   */
+  public static final String CARBON_SORT_FILE_WRITE_BUFFER_SIZE =
+      "carbon.sort.file.write.buffer.size";
+  /**
+   * SORT_FILE_WRITE_BUFFER_SIZE_DEFAULT_VALUE
+   */
+  public static final String CARBON_SORT_FILE_WRITE_BUFFER_SIZE_DEFAULT_VALUE = "50000";
+  /**
+   * Number of cores to be used while loading
+   */
+  public static final String NUM_CORES_LOADING = "carbon.number.of.cores.while.loading";
+  /**
+   * Number of cores to be used while compacting
+   */
+  public static final String NUM_CORES_COMPACTING = "carbon.number.of.cores.while.compacting";
+  /**
+   * Number of cores to be used for block sort
+   */
+  public static final String NUM_CORES_BLOCK_SORT = "carbon.number.of.cores.block.sort";
+  /**
+   * Default value of number of cores to be used for block sort
+   */
+  public static final String NUM_CORES_BLOCK_SORT_DEFAULT_VAL = "7";
+  /**
+   * Max value of number of cores to be used for block sort
+   */
+  public static final int NUM_CORES_BLOCK_SORT_MAX_VAL = 12;
+  /**
+   * Min value of number of cores to be used for block sort
+   */
+  public static final int NUM_CORES_BLOCK_SORT_MIN_VAL = 1;
+  /**
+   * CSV_READ_BUFFER_SIZE
+   */
+  public static final String CSV_READ_BUFFER_SIZE = "carbon.csv.read.buffersize.byte";
+  /**
+   * CSV_READ_BUFFER_SIZE
+   */
+  public static final String CSV_READ_BUFFER_SIZE_DEFAULT = "50000";
+  /**
+   * CSV_READ_COPIES
+   */
+  public static final String DEFAULT_NUMBER_CORES = "2";
+  /**
+   * CSV_FILE_EXTENSION
+   */
+  public static final String CSV_FILE_EXTENSION = ".csv";
+  /**
+   * COLON_SPC_CHARACTER
+   */
+  public static final String COLON_SPC_CHARACTER = ":!@#COLON#@!:";
+  /**
+   * HASH_SPC_CHARATER
+   */
+  public static final String HASH_SPC_CHARACTER = "#!@:HASH:@!#";
+  /**
+   * SEMICOLON_SPC_CHARATER
+   */
+  public static final String SEMICOLON_SPC_CHARACTER = ";#!@:SEMIC:@!#;";
+  /**
+   * AMPERSAND_SPC_CHARATER
+   */
+  public static final String AMPERSAND_SPC_CHARACTER = "&#!@:AMPER:@!#&";
+  /**
+   * ATTHERATE_SPC_CHARATER
+   */
+  public static final String COMA_SPC_CHARACTER = ",#!:COMA:!#,";
+  /**
+   * HYPHEN_SPC_CHARACTER
+   */
+  public static final String HYPHEN_SPC_CHARACTER = "-#!:HYPHEN:!#-";
+  /**
+   * CARBON_DECIMAL_POINTERS_DEFAULT
+   */
+  public static final byte CARBON_DECIMAL_POINTERS_DEFAULT = 5;
+  /**
+   * SORT_TEMP_FILE_EXT
+   */
+  public static final String SORT_TEMP_FILE_EXT = ".sorttemp";
+  /**
+   * CARBON_MERGE_SORT_READER_THREAD
+   */
+  public static final String CARBON_MERGE_SORT_READER_THREAD = "carbon.merge.sort.reader.thread";
+  /**
+   * CARBON_MERGE_SORT_READER_THREAD_DEFAULTVALUE
+   */
+  public static final String CARBON_MERGE_SORT_READER_THREAD_DEFAULTVALUE = "3";
+  /**
+   * IS_SORT_TEMP_FILE_COMPRESSION_ENABLED
+   */
+  public static final String IS_SORT_TEMP_FILE_COMPRESSION_ENABLED =
+      "carbon.is.sort.temp.file.compression.enabled";
+  /**
+   * IS_SORT_TEMP_FILE_COMPRESSION_ENABLED_DEFAULTVALUE
+   */
+  public static final String IS_SORT_TEMP_FILE_COMPRESSION_ENABLED_DEFAULTVALUE = "false";
+  /**
+   * SORT_TEMP_FILE_NO_OF_RECORDS_FOR_COMPRESSION
+   */
+  public static final String SORT_TEMP_FILE_NO_OF_RECORDS_FOR_COMPRESSION =
+      "carbon.sort.temp.file.no.of.records.for.compression";
+  /**
+   * SORT_TEMP_FILE_NO_OF_RECORD_FOR_COMPRESSION_DEFAULTVALUE
+   */
+  public static final String SORT_TEMP_FILE_NO_OF_RECORD_FOR_COMPRESSION_DEFAULTVALUE = "50";
+  /**
+   * DEFAULT_COLLECTION_SIZE
+   */
+  public static final int DEFAULT_COLLECTION_SIZE = 16;
+  /**
+   * CARBON_TIMESTAMP_DEFAULT_FORMAT
+   */
+  public static final String CARBON_TIMESTAMP_DEFAULT_FORMAT = "yyyy-MM-dd HH:mm:ss";
+  /**
+   * CARBON_TIMESTAMP_DEFAULT_FORMAT
+   */
+  public static final String CARBON_TIMESTAMP_FORMAT = "carbon.timestamp.format";
+  /**
+   * STORE_LOCATION_HDFS
+   */
+  public static final String STORE_LOCATION_HDFS = "carbon.storelocation.hdfs";
+  /**
+   * STORE_LOCATION_TEMP_PATH
+   */
+  public static final String STORE_LOCATION_TEMP_PATH = "carbon.tempstore.location";
+  /**
+   * IS_COLUMNAR_STORAGE_DEFAULTVALUE
+   */
+  public static final String IS_COLUMNAR_STORAGE_DEFAULTVALUE = "true";
+  /**
+   * DIMENSION_SPLIT_VALUE_IN_COLUMNAR_DEFAULTVALUE
+   */
+  public static final String DIMENSION_SPLIT_VALUE_IN_COLUMNAR_DEFAULTVALUE = "1";
+  /**
+   * IS_FULLY_FILLED_BITS_DEFAULT_VALUE
+   */
+  public static final String IS_FULLY_FILLED_BITS_DEFAULT_VALUE = "true";
+  /**
+   * IS_INT_BASED_INDEXER
+   */
+  public static final String AGGREAGATE_COLUMNAR_KEY_BLOCK = "aggregate.columnar.keyblock";
+  /**
+   * IS_INT_BASED_INDEXER_DEFAULTVALUE
+   */
+  public static final String AGGREAGATE_COLUMNAR_KEY_BLOCK_DEFAULTVALUE = "true";
+  /**
+   * TIME_STAT_UTIL_TYPE
+   */
+  public static final String ENABLE_DATA_LOADING_STATISTICS = "enable.data.loading.statistics";
+  /**
+   * TIME_STAT_UTIL_TYPE_DEFAULT
+   */
+  public static final String ENABLE_DATA_LOADING_STATISTICS_DEFAULT = "false";
+  /**
+   * IS_INT_BASED_INDEXER
+   */
+  public static final String HIGH_CARDINALITY_VALUE = "high.cardinality.value";
+  /**
+   * IS_INT_BASED_INDEXER_DEFAULTVALUE
+   */
+  public static final String HIGH_CARDINALITY_VALUE_DEFAULTVALUE = "100000";
+  /**
+   * CONSTANT_SIZE_TEN
+   */
+  public static final int CONSTANT_SIZE_TEN = 10;
+  /**
+   * LEVEL_METADATA_FILE
+   */
+  public static final String LEVEL_METADATA_FILE = "levelmetadata_";
+  public static final String ENABLE_BASE64_ENCODING = "enable.base64.encoding";
+  public static final String ENABLE_BASE64_ENCODING_DEFAULT = "false";
+  /**
+   * LOAD_STATUS SUCCESS
+   */
+  public static final String STORE_LOADSTATUS_SUCCESS = "Success";
+  /**
+   * LOAD_STATUS FAILURE
+   */
+  public static final String STORE_LOADSTATUS_FAILURE = "Failure";
+  /**
+   * LOAD_STATUS PARTIAL_SUCCESS
+   */
+  public static final String STORE_LOADSTATUS_PARTIAL_SUCCESS = "Partial Success";
+  /**
+   * LOAD_STATUS
+   */
+  public static final String CARBON_METADATA_EXTENSION = ".metadata";
+  /**
+   * LOAD_STATUS
+   */
+  public static final String CARBON_DEFAULT_STREAM_ENCODEFORMAT = "UTF-8";
+  /**
+   * AGGREGATE_TABLE_START_TAG
+   */
+  public static final String AGGREGATE_TABLE_START_TAG = "agg";
+  /**
+   * COMMA
+   */
+  public static final String COMMA = ",";
+  /**
+   * UNDERSCORE
+   */
+  public static final String UNDERSCORE = "_";
+  /**
+   * POINT
+   */
+  public static final String POINT = ".";
+  /**
+   * File separator
+   */
+  public static final String FILE_SEPARATOR = "/";
+  /**
+   * MAX_QUERY_EXECUTION_TIME
+   */
+  public static final String MAX_QUERY_EXECUTION_TIME = "max.query.execution.time";
+  /**
+   * CARBON_TIMESTAMP
+   */
+  public static final String CARBON_TIMESTAMP = "dd-MM-yyyy HH:mm:ss";
+  /**
+   * METADATA_LOCK
+   */
+  public static final String METADATA_LOCK = "meta.lock";
+  /**
+   * NUMBER_OF_TRIES_FOR_LOAD_METADATA_LOCK
+   */
+  public static final int NUMBER_OF_TRIES_FOR_LOAD_METADATA_LOCK_DEFAULT = 3;
+  /**
+   * MAX_TIMEOUT_FOR_LOAD_METADATA_LOCK
+   */
+  public static final int MAX_TIMEOUT_FOR_LOAD_METADATA_LOCK_DEFAULT = 5;
+  /**
+   * NUMBER_OF_TRIES_FOR_LOAD_METADATA_LOCK
+   */
+  public static final String NUMBER_OF_TRIES_FOR_LOAD_METADATA_LOCK =
+      "carbon.load.metadata.lock.retries";
+  /**
+   * MAX_TIMEOUT_FOR_LOAD_METADATA_LOCK
+   */
+  public static final String MAX_TIMEOUT_FOR_LOAD_METADATA_LOCK =
+      "carbon.load.metadata.lock.retry.timeout.sec";
+  /**
+   * MARKED_FOR_DELETION
+   */
+  public static final String MARKED_FOR_DELETE = "Marked for Delete";
+  public static final String MARKED_FOR_UPDATE = "Marked for Update";
+  public static final String STRING_TYPE = "StringType";
+  public static final String INTEGER_TYPE = "IntegerType";
+  public static final String LONG_TYPE = "LongType";
+  public static final String DOUBLE_TYPE = "DoubleType";
+  public static final String FLOAT_TYPE = "FloatType";
+  public static final String DATE_TYPE = "DateType";
+  public static final String BOOLEAN_TYPE = "BooleanType";
+  public static final String TIMESTAMP_TYPE = "TimestampType";
+  public static final String BYTE_TYPE = "ByteType";
+  public static final String SHORT_TYPE = "ShortType";
+  public static final String BINARY_TYPE = "BinaryType";
+  public static final String DECIMAL_TYPE = "DecimalType";
+  public static final String STRING = "String";
+  public static final String COLUMNAR = "columnar";
+
+  public static final String INTEGER = "Integer";
+  public static final String SHORT = "Short";
+  public static final String NUMERIC = "Numeric";
+  public static final String TIMESTAMP = "Timestamp";
+  public static final String ARRAY = "ARRAY";
+  public static final String STRUCT = "STRUCT";
+  public static final String INCLUDE = "include";
+  public static final String FROM = "from";
+  public static final String WITH = "with";
+  /**
+   * FACT_UPDATE_EXTENSION.
+   */
+  public static final String FACT_UPDATE_EXTENSION = ".carbondata_update";
+  public static final String FACT_DELETE_EXTENSION = "_delete";
+  /**
+   * MARKED_FOR_UPDATION
+   */
+  public static final String FACT_FILE_UPDATED = "update";
+  /**
+   * MAX_QUERY_EXECUTION_TIME
+   */
+  public static final int DEFAULT_MAX_QUERY_EXECUTION_TIME = 60;
+  /**
+   * LOADMETADATA_FILENAME
+   */
+  public static final String LOADMETADATA_FILENAME = "tablestatus";
+  public static final String SUM_DISTINCT = "sum-distinct";
+  /**
+   * INMEMORY_REOCRD_SIZE
+   */
+  public static final String INMEMORY_REOCRD_SIZE = "carbon.inmemory.record.size";
+  public static final int INMEMORY_REOCRD_SIZE_DEFAULT = 240000;
+
+  /**
+   * INMEMORY_REOCRD_SIZE
+   */
+  public static final String DETAIL_QUERY_BATCH_SIZE = "carbon.detail.batch.size";
+  public static final int DETAIL_QUERY_BATCH_SIZE_DEFAULT = 10000;
+  /**
+   * SPILL_OVER_DISK_PATH
+   */
+  public static final String SCHEMAS_MODIFIED_TIME_FILE = "modifiedTime.mdt";
+  public static final String DEFAULT_INVISIBLE_DUMMY_MEASURE = "default_dummy_measure";
+  /**
+   * max level cache size upto which level cache will be loaded in memory
+   */
+  public static final String CARBON_MAX_LEVEL_CACHE_SIZE = "carbon.max.level.cache.size";
+  /**
+   * max level cache size default value in GB
+   */
+  public static final String CARBON_MAX_LEVEL_CACHE_SIZE_DEFAULT = "-1";
+  /**
+   * DOUBLE_VALUE_MEASURE
+   */
+  public static final char SUM_COUNT_VALUE_MEASURE = 'n';
+  /**
+   * BYTE_VALUE_MEASURE
+   */
+  public static final char BYTE_VALUE_MEASURE = 'c';
+  /**
+   * BIG_DECIMAL_MEASURE
+   */
+  public static final char BIG_DECIMAL_MEASURE = 'b';
+
+  /**
+   * BIG_INT_MEASURE
+   */
+  public static final char BIG_INT_MEASURE = 'l';
+
+  /**
+   * This determines the size of array to be processed in data load steps. one
+   * for dimensions , one of ignore dictionary dimensions , one for measures.
+   */
+  public static final int ARRAYSIZE = 3;
+  /**
+   * CARBON_PREFETCH_BUFFERSIZE
+   */
+  public static final int CARBON_PREFETCH_BUFFERSIZE = 20000;
+  /**
+   * CARBON_PREFETCH_IN_MERGE
+   */
+  public static final boolean CARBON_PREFETCH_IN_MERGE_VALUE = false;
+  /**
+   * TEMPWRITEFILEEXTENSION
+   */
+  public static final String TEMPWRITEFILEEXTENSION = ".write";
+  /**
+   * ENABLE_AUTO_LOAD_MERGE
+   */
+  public static final String ENABLE_AUTO_LOAD_MERGE = "carbon.enable.auto.load.merge";
+  /**
+   * DEFAULT_ENABLE_AUTO_LOAD_MERGE
+   */
+  public static final String DEFAULT_ENABLE_AUTO_LOAD_MERGE = "false";
+
+  /**
+   * ZOOKEEPER_ENABLE_LOCK if this is set to true then zookeeper will be used to handle locking
+   * mechanism of carbon
+   */
+  public static final String LOCK_TYPE = "carbon.lock.type";
+
+  /**
+   * ZOOKEEPER_ENABLE_DEFAULT the default value for zookeeper will be true for carbon
+   */
+  public static final String LOCK_TYPE_DEFAULT = "LOCALLOCK";
+
+  /**
+   * ZOOKEEPER_LOCATION this is the location in zookeeper file system where locks are created.
+   * mechanism of carbon
+   */
+  public static final String ZOOKEEPER_LOCATION = "/CarbonLocks";
+
+  /**
+   * maximum dictionary chunk size that can be kept in memory while writing dictionary file
+   */
+  public static final String DICTIONARY_ONE_CHUNK_SIZE = "carbon.dictionary.chunk.size";
+
+  /**
+   * dictionary chunk default size
+   */
+  public static final String DICTIONARY_ONE_CHUNK_SIZE_DEFAULT = "10000";
+
+  /**
+   * xxhash algorithm property for hashmap
+   */
+  public static final String ENABLE_XXHASH = "carbon.enableXXHash";
+
+  /**
+   * xxhash algorithm property for hashmap. Default value false
+   */
+  public static final String ENABLE_XXHASH_DEFAULT = "true";
+
+  /**
+   * default charset to be used for reading and writing
+   */
+  public static final String DEFAULT_CHARSET = "UTF-8";
+
+  /**
+   * surrogate key that will be sent whenever in the dictionary chunks
+   * a valid surrogate key is not found for a given dictionary value
+   */
+  public static final int INVALID_SURROGATE_KEY = -1;
+
+  /**
+   * surrogate key for MEMBER_DEFAULT_VAL
+   */
+  public static final int MEMBER_DEFAULT_VAL_SURROGATE_KEY = 1;
+
+  public static final String INVALID_SEGMENT_ID = "-1";
+
+  /**
+   * Size of Major Compaction in MBs
+   */
+  public static final String MAJOR_COMPACTION_SIZE = "carbon.major.compaction.size";
+
+  /**
+   * By default size of major compaction in MBs.
+   */
+  public static final String DEFAULT_MAJOR_COMPACTION_SIZE = "1024";
+
+  /**
+   * This property is used to tell how many segments to be preserved from merging.
+   */
+  public static final java.lang.String PRESERVE_LATEST_SEGMENTS_NUMBER =
+      "carbon.numberof.preserve.segments";
+
+  /**
+   * If preserve property is enabled then 2 segments will be preserved.
+   */
+  public static final String DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER = "0";
+
+  /**
+   * This property will determine the loads of how many days can be compacted.
+   */
+  public static final java.lang.String DAYS_ALLOWED_TO_COMPACT = "carbon.allowed.compaction.days";
+
+  /**
+   * Default value of 1 day loads can be compacted
+   */
+  public static final String DEFAULT_DAYS_ALLOWED_TO_COMPACT = "0";
+
+  /**
+   * space reserved for writing block meta data in carbon data file
+   */
+  public static final String CARBON_BLOCK_META_RESERVED_SPACE =
+      "carbon.block.meta.size.reserved.percentage";
+
+  /**
+   * default value for space reserved for writing block meta data in carbon data file
+   */
+  public static final String CARBON_BLOCK_META_RESERVED_SPACE_DEFAULT = "10";
+
+  /**
+   * property to enable min max during filter query
+   */
+  public static final String CARBON_QUERY_MIN_MAX_ENABLED = "carbon.enableMinMax";
+
+  /**
+   * default value to enable min or max during filter query execution
+   */
+  public static final String MIN_MAX_DEFAULT_VALUE = "true";
+
+  /**
+   * this variable is to enable/disable prefetch of data during merge sort while
+   * reading data from sort temp files
+   */
+  public static final String CARBON_MERGE_SORT_PREFETCH = "carbon.merge.sort.prefetch";
+  public static final String CARBON_MERGE_SORT_PREFETCH_DEFAULT = "true";
+
+  /**
+   *  default name of data base
+   */
+  public static final String DATABASE_DEFAULT_NAME = "default";
+
+  // tblproperties
+  public static final String COLUMN_GROUPS = "column_groups";
+  public static final String DICTIONARY_EXCLUDE = "dictionary_exclude";
+  public static final String DICTIONARY_INCLUDE = "dictionary_include";
+  public static final String PARTITIONCLASS = "partitionclass";
+  public static final String PARTITIONCOUNT = "partitioncount";
+  public static final String COLUMN_PROPERTIES = "columnproperties";
+
+  /**
+   * this variable is to enable/disable identify high cardinality during first data loading
+   */
+  public static final String HIGH_CARDINALITY_IDENTIFY_ENABLE =
+      "high.cardinality.identify.enable";
+  public static final String HIGH_CARDINALITY_IDENTIFY_ENABLE_DEFAULT = "true";
+
+  /**
+   * threshold of high cardinality
+   */
+  public static final String HIGH_CARDINALITY_THRESHOLD = "high.cardinality.threshold";
+  public static final String HIGH_CARDINALITY_THRESHOLD_DEFAULT = "1000000";
+  public static final int HIGH_CARDINALITY_THRESHOLD_MIN = 10000;
+
+  /**
+   * percentage of cardinality in row count
+   */
+  public static final String HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE =
+      "high.cardinality.row.count.percentage";
+  public static final String HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT = "80";
+
+  /**
+   * 16 mb size
+   */
+  public static final long CARBON_16MB = 16*1024*1024;
+  /**
+   * 256 mb size
+   */
+  public static final long CARBON_256MB = 256*1024*1024;
+
+  /**
+   * Data type String.
+   */
+  public static final String DATATYPE_STRING = "STRING";
+
+  /**
+   * SEGMENT_COMPACTED is property to indicate whether seg is compacted or not.
+   */
+  public static final String SEGMENT_COMPACTED = "Compacted";
+
+  /**
+   * property for number of core to load the blocks in driver
+   */
+  public static final String NUMBER_OF_CORE_TO_LOAD_DRIVER_SEGMENT =
+      "no.of.cores.to.load.blocks.in.driver";
+  /**
+   * default number of cores
+   */
+  public static final int NUMBER_OF_CORE_TO_LOAD_DRIVER_SEGMENT_DEFAULT_VALUE = 10;
+
+  /**
+   * ZOOKEEPERLOCK TYPE
+   */
+  public static final String CARBON_LOCK_TYPE_ZOOKEEPER =
+      "ZOOKEEPERLOCK";
+
+  /**
+   * LOCALLOCK TYPE
+   */
+  public static final String CARBON_LOCK_TYPE_LOCAL =
+      "LOCALLOCK";
+
+  /**
+   * HDFSLOCK TYPE
+   */
+  public static final String CARBON_LOCK_TYPE_HDFS =
+      "HDFSLOCK";
+
+  /**
+   * Lock file in zoo keeper will be of this name.
+   */
+  public static final String ZOOKEEPER_LOCK = "zookeeperLock";
+
+  /**
+   * Invalid filter member log string
+   */
+  public static final String FILTER_INVALID_MEMBER = " Invalid Record(s) are present "
+                                                     + "while filter evaluation. ";
+
+  /**
+   * Number of unmerged segments to be merged.
+   */
+  public static final String COMPACTION_SEGMENT_LEVEL_THRESHOLD =
+      "carbon.compaction.level.threshold";
+
+  /**
+   * Default count for Number of segments to be merged in levels is 4,3
+   */
+  public static final String DEFAULT_SEGMENT_LEVEL_THRESHOLD = "4,3";
+
+  /**
+   * default location of the carbon metastore db
+   */
+  public static final String METASTORE_LOCATION_DEFAULT_VAL = "../carbon.metastore";
+
+  /**
+   * hive connection url
+   */
+  public static final String HIVE_CONNECTION_URL = "javax.jdo.option.ConnectionURL";
+
+  /**
+   * Rocord size in case of compaction.
+   */
+  public static final int COMPACTION_INMEMORY_RECORD_SIZE = 120000;
+
+  /**
+   * If the level 2 compaction is done in minor then new compacted segment will end with .2
+   */
+  public static String LEVEL2_COMPACTION_INDEX = ".2";
+
+  /**
+   * Indicates compaction
+   */
+  public static String COMPACTION_KEY_WORD = "COMPACTION";
+
+  /**
+   * hdfs temporary directory key
+   */
+  public static final String HDFS_TEMP_LOCATION = "hadoop.tmp.dir";
+
+  /**
+   * zookeeper url key
+   */
+  public static final String ZOOKEEPER_URL = "spark.deploy.zookeeper.url";
+
+  private CarbonCommonConstants() {
+  }
+}
+
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/constants/IgnoreDictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/IgnoreDictionary.java b/core/src/main/java/org/apache/carbondata/core/constants/IgnoreDictionary.java
new file mode 100644
index 0000000..6a0105c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/constants/IgnoreDictionary.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.constants;
+
+/**
+ * This enum is used for determining the indexes of the
+ * dimension,ignoreDictionary,measure columns.
+ */
+public enum IgnoreDictionary {
+  /**
+   * POSITION WHERE DIMENSIONS R STORED IN OBJECT ARRAY.
+   */
+  DIMENSION_INDEX_IN_ROW(0),
+
+  /**
+   * POSITION WHERE BYTE[] (high cardinality) IS STORED IN OBJECT ARRAY.
+   */
+  BYTE_ARRAY_INDEX_IN_ROW(1),
+
+  /**
+   * POSITION WHERE MEASURES R STORED IN OBJECT ARRAY.
+   */
+  MEASURES_INDEX_IN_ROW(2);
+
+  private final int index;
+
+  IgnoreDictionary(int index) {
+    this.index = index;
+  }
+
+  public int getIndex() {
+    return this.index;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/FileHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/FileHolder.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/FileHolder.java
new file mode 100644
index 0000000..9f8d8ed
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/FileHolder.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store;
+
+
+public interface FileHolder {
+  /**
+   * This method will be used to read the byte array from file based on offset
+   * and length(number of bytes) need to read
+   *
+   * @param filePath fully qualified file path
+   * @param offset   reading start position,
+   * @param length   number of bytes to be read
+   * @return read byte array
+   */
+  byte[] readByteArray(String filePath, long offset, int length);
+
+  /**
+   * This method will be used to read the byte array from file based on length(number of bytes)
+   *
+   * @param filePath fully qualified file path
+   * @param length   number of bytes to be read
+   * @return read byte array
+   */
+  byte[] readByteArray(String filePath, int length);
+
+  /**
+   * This method will be used to read int from file from postion(offset), here
+   * length will be always 4 bacause int byte size if 4
+   *
+   * @param filePath fully qualified file path
+   * @param offset   reading start position,
+   * @return read int
+   */
+  int readInt(String filePath, long offset);
+
+  /**
+   * This method will be used to read long from file from postion(offset), here
+   * length will be always 8 bacause int byte size is 8
+   *
+   * @param filePath fully qualified file path
+   * @param offset   reading start position,
+   * @return read long
+   */
+  long readLong(String filePath, long offset);
+
+  /**
+   * This method will be used to read int from file from postion(offset), here
+   * length will be always 4 bacause int byte size if 4
+   *
+   * @param filePath fully qualified file path
+   * @return read int
+   */
+  int readInt(String filePath);
+
+  /**
+   * This method will be used to read long value from file from postion(offset), here
+   * length will be always 8 because long byte size if 4
+   *
+   * @param filePath fully qualified file path
+   * @param offset   reading start position,
+   * @return read long
+   */
+  long readDouble(String filePath, long offset);
+
+  /**
+   * This method will be used to close all the streams currently present in the cache
+   */
+  void finish();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/MeasureDataWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/MeasureDataWrapper.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/MeasureDataWrapper.java
new file mode 100644
index 0000000..80a4374
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/MeasureDataWrapper.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store;
+
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+
+/**
+ * MeasureDataWrapper, interface.
+ */
+public interface MeasureDataWrapper {
+  CarbonReadDataHolder[] getValues();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/NodeKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/NodeKeyStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/NodeKeyStore.java
new file mode 100644
index 0000000..42c5071
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/NodeKeyStore.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store;
+
+public interface NodeKeyStore {
+  /**
+   * This method will be used to get the actual mdkeys array present in the
+   * store store
+   *
+   * @param fileHolder
+   * @return mdkey
+   */
+  byte[] getBackArray(FileHolder fileHolder);
+
+  /**
+   * This method will be used to insert mdkey to store
+   *
+   * @param index index of mdkey
+   * @param value mdkey
+   */
+  void put(int index, byte[] value);
+
+  /**
+   * This method will be used to get the writable key array.
+   * writable key array will hold below information:
+   * <size of key array><key array>
+   * total length will be 4 bytes for size + key array length
+   *
+   * @return writable array (compressed or normal)
+   */
+  byte[] getWritableKeyArray();
+
+  /**
+   * This method will be used to get the mdkkey array based on index
+   *
+   * @param index      index in store
+   * @param fileHolder file holder will be used to read the file
+   * @return mdkey
+   */
+  byte[] get(int index, FileHolder fileHolder);
+
+  /**
+   * This method will clear the store and create the new empty store
+   */
+  void clear();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/NodeMeasureDataStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/NodeMeasureDataStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/NodeMeasureDataStore.java
new file mode 100644
index 0000000..f2fe9d2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/NodeMeasureDataStore.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store;
+
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonWriteDataHolder;
+
+public interface NodeMeasureDataStore {
+  /**
+   * This method will be used to get the writable key array.
+   * writable measure data array will hold below information:
+   * <size of measure data array><measure data array>
+   * total length will be 4 bytes for size + measure data array length
+   *
+   * @return writable array (compressed or normal)
+   */
+  byte[][] getWritableMeasureDataArray(CarbonWriteDataHolder[] dataHolderArray);
+
+  MeasureDataWrapper getBackData(int[] cols, FileHolder fileHolder);
+
+  MeasureDataWrapper getBackData(int cols, FileHolder fileHolder);
+
+  short getLength();
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForInt.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForInt.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForInt.java
new file mode 100644
index 0000000..013d873
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForInt.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.ByteUtil;
+
+public class BlockIndexerStorageForInt implements IndexStorage<int[]> {
+  private boolean alreadySorted;
+
+  private int[] dataAfterComp;
+
+  private int[] indexMap;
+
+  private byte[][] keyBlock;
+
+  private int[] dataIndexMap;
+
+  private int totalSize;
+
+  public BlockIndexerStorageForInt(byte[][] keyBlock, boolean compressData, boolean isNoDictionary,
+      boolean isSortRequired) {
+    ColumnWithIntIndex[] columnWithIndexs = createColumnWithIndexArray(keyBlock, isNoDictionary);
+    if (isSortRequired) {
+      Arrays.sort(columnWithIndexs);
+    }
+    compressMyOwnWay(extractDataAndReturnIndexes(columnWithIndexs, keyBlock));
+    if (compressData) {
+      compressDataMyOwnWay(columnWithIndexs);
+    }
+  }
+
+  /**
+   * Create an object with each column array and respective index
+   *
+   * @return
+   */
+  private ColumnWithIntIndex[] createColumnWithIndexArray(byte[][] keyBlock,
+      boolean isNoDictionary) {
+    ColumnWithIntIndex[] columnWithIndexs;
+    if (isNoDictionary) {
+      columnWithIndexs = new ColumnWithIntIndexForHighCard[keyBlock.length];
+      for (int i = 0; i < columnWithIndexs.length; i++) {
+        columnWithIndexs[i] = new ColumnWithIntIndexForHighCard(keyBlock[i], i);
+      }
+
+    } else {
+      columnWithIndexs = new ColumnWithIntIndex[keyBlock.length];
+      for (int i = 0; i < columnWithIndexs.length; i++) {
+        columnWithIndexs[i] = new ColumnWithIntIndex(keyBlock[i], i);
+      }
+    }
+
+    return columnWithIndexs;
+  }
+
+  private int[] extractDataAndReturnIndexes(ColumnWithIntIndex[] columnWithIndexs,
+      byte[][] keyBlock) {
+    int[] indexes = new int[columnWithIndexs.length];
+    for (int i = 0; i < indexes.length; i++) {
+      indexes[i] = columnWithIndexs[i].getIndex();
+      keyBlock[i] = columnWithIndexs[i].getColumn();
+    }
+    this.keyBlock = keyBlock;
+    return indexes;
+  }
+
+  /**
+   * It compresses depends up on the sequence numbers.
+   * [1,2,3,4,6,8,10,11,12,13] is translated to [1,4,6,8,10,13] and [0,6]. In
+   * first array the start and end of sequential numbers and second array
+   * keeps the indexes of where sequential numbers starts. If there is no
+   * sequential numbers then the same array it returns with empty second
+   * array.
+   *
+   * @param indexes
+   */
+  public void compressMyOwnWay(int[] indexes) {
+    List<Integer> list = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+    List<Integer> map = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+    int k = 0;
+    int i = 1;
+    for (; i < indexes.length; i++) {
+      if (indexes[i] - indexes[i - 1] == 1) {
+        k++;
+      } else {
+        if (k > 0) {
+          map.add((list.size()));
+          list.add(indexes[i - k - 1]);
+          list.add(indexes[i - 1]);
+        } else {
+          list.add(indexes[i - 1]);
+        }
+        k = 0;
+      }
+    }
+    if (k > 0) {
+      map.add((list.size()));
+      list.add(indexes[i - k - 1]);
+      list.add(indexes[i - 1]);
+    } else {
+      list.add(indexes[i - 1]);
+    }
+    dataAfterComp = convertToArray(list);
+    if (indexes.length == dataAfterComp.length) {
+      indexMap = new int[0];
+    } else {
+      indexMap = convertToArray(map);
+    }
+    if (dataAfterComp.length == 2 && indexMap.length == 1) {
+      alreadySorted = true;
+    }
+  }
+
+  private int[] convertToArray(List<Integer> list) {
+    int[] shortArray = new int[list.size()];
+    for (int i = 0; i < shortArray.length; i++) {
+      shortArray[i] = list.get(i);
+    }
+    return shortArray;
+  }
+
+  /**
+   * @return the alreadySorted
+   */
+  public boolean isAlreadySorted() {
+    return alreadySorted;
+  }
+
+  /**
+   * @return the dataAfterComp
+   */
+  public int[] getDataAfterComp() {
+    return dataAfterComp;
+  }
+
+  /**
+   * @return the indexMap
+   */
+  public int[] getIndexMap() {
+    return indexMap;
+  }
+
+  /**
+   * @return the keyBlock
+   */
+  public byte[][] getKeyBlock() {
+    return keyBlock;
+  }
+
+  private void compressDataMyOwnWay(ColumnWithIntIndex[] indexes) {
+    byte[] prvKey = indexes[0].getColumn();
+    List<ColumnWithIntIndex> list =
+        new ArrayList<ColumnWithIntIndex>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+    list.add(indexes[0]);
+    int counter = 1;
+    int start = 0;
+    List<Integer> map = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+    for (int i = 1; i < indexes.length; i++) {
+      if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(prvKey, indexes[i].getColumn()) != 0) {
+        prvKey = indexes[i].getColumn();
+        list.add(indexes[i]);
+        map.add(start);
+        map.add(counter);
+        start += counter;
+        counter = 1;
+        continue;
+      }
+      counter++;
+    }
+    map.add(start);
+    map.add(counter);
+    this.keyBlock = convertToKeyArray(list);
+    if (indexes.length == keyBlock.length) {
+      dataIndexMap = new int[0];
+    } else {
+      dataIndexMap = convertToArray(map);
+    }
+  }
+
+  private byte[][] convertToKeyArray(List<ColumnWithIntIndex> list) {
+    byte[][] shortArray = new byte[list.size()][];
+    for (int i = 0; i < shortArray.length; i++) {
+      shortArray[i] = list.get(i).getColumn();
+      totalSize += shortArray[i].length;
+    }
+    return shortArray;
+  }
+
+  @Override public int[] getDataIndexMap() {
+    return dataIndexMap;
+  }
+
+  @Override public int getTotalSize() {
+    return totalSize;
+  }
+
+  @Override public byte[] getMin() {
+    return keyBlock[0];
+  }
+
+  @Override public byte[] getMax() {
+    return keyBlock[keyBlock.length - 1];
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForNoInvertedIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForNoInvertedIndex.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForNoInvertedIndex.java
new file mode 100644
index 0000000..c7d43cf
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/BlockIndexerStorageForNoInvertedIndex.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.ByteUtil;
+
+public class BlockIndexerStorageForNoInvertedIndex implements IndexStorage<int[]> {
+  private byte[][] keyBlock;
+  private byte[][] sortedBlock;
+  private int totalSize;
+  private int[] dataIndexMap;
+
+  public BlockIndexerStorageForNoInvertedIndex(byte[][] keyBlockInput, boolean compressData,
+      boolean isNoDictionary) {
+    // without invertedindex but can be RLE
+    if (compressData) {
+      // with RLE
+      byte[] prvKey = keyBlockInput[0];
+      List<byte[]> list = new ArrayList<byte[]>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+      list.add(keyBlockInput[0]);
+      int counter = 1;
+      int start = 0;
+      List<Integer> map = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+      int length = keyBlockInput.length;
+      for(int i = 1; i < length; i++) {
+        if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(prvKey, keyBlockInput[i]) != 0) {
+          prvKey = keyBlockInput[i];
+          list.add(keyBlockInput[i]);
+          map.add(start);
+          map.add(counter);
+          start += counter;
+          counter = 1;
+          continue;
+        }
+        counter++;
+      }
+      map.add(start);
+      map.add(counter);
+      this.keyBlock = convertToKeyArray(list);
+      if (keyBlockInput.length == this.keyBlock.length) {
+        dataIndexMap = new int[0];
+      } else {
+        dataIndexMap = convertToArray(map);
+      }
+    } else {
+      this.keyBlock = keyBlockInput;
+      dataIndexMap = new int[0];
+    }
+
+    this.sortedBlock = new byte[keyBlock.length][];
+    System.arraycopy(keyBlock, 0, sortedBlock, 0, keyBlock.length);
+    if (isNoDictionary) {
+      Arrays.sort(sortedBlock, new Comparator<byte[]>() {
+        @Override
+        public int compare(byte[] col1, byte[] col2) {
+          return ByteUtil.UnsafeComparer.INSTANCE
+              .compareTo(col1, 2, col1.length - 2, col2, 2, col2.length - 2);
+        }
+      });
+    } else {
+      Arrays.sort(sortedBlock, new Comparator<byte[]>() {
+        @Override
+        public int compare(byte[] col1, byte[] col2) {
+          return ByteUtil.UnsafeComparer.INSTANCE.compareTo(col1, col2);
+        }
+      });
+    }
+
+  }
+
+  private int[] convertToArray(List<Integer> list) {
+    int[] shortArray = new int[list.size()];
+    for(int i = 0; i < shortArray.length; i++) {
+      shortArray[i] = list.get(i);
+    }
+    return shortArray;
+  }
+
+  private byte[][] convertToKeyArray(List<byte[]> list) {
+    byte[][] shortArray = new byte[list.size()][];
+    for (int i = 0; i < shortArray.length; i++) {
+      shortArray[i] = list.get(i);
+      totalSize += shortArray[i].length;
+    }
+    return shortArray;
+  }
+
+  @Override
+  public int[] getDataIndexMap() {
+    return dataIndexMap;
+  }
+
+  @Override
+  public int getTotalSize() {
+    return totalSize;
+  }
+
+  @Override
+  public boolean isAlreadySorted() {
+    return true;
+  }
+
+  /**
+   * no use
+   * @return
+   */
+  @Override
+  public int[] getDataAfterComp() {
+    return new int[0];
+  }
+
+  /**
+   * no use
+   * @return
+   */
+  @Override
+  public int[] getIndexMap() {
+    return new int[0];
+  }
+
+  /**
+   * @return the keyBlock
+   */
+  public byte[][] getKeyBlock() {
+    return keyBlock;
+  }
+
+  @Override public byte[] getMin() {
+    return sortedBlock[0];
+  }
+
+  @Override public byte[] getMax() {
+    return sortedBlock[sortedBlock.length - 1];
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnGroupModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnGroupModel.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnGroupModel.java
new file mode 100644
index 0000000..cf9ba40
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnGroupModel.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+public class ColumnGroupModel {
+
+  /**
+   * number of columns in columnar block
+   */
+  private int[] columnSplit;
+
+  /**
+   * total number of columns
+   */
+  private int noOfColumnsStore;
+
+  /**
+   * whether given index is columnar or not
+   * true: columnar
+   * false: row block
+   */
+  private boolean[] columnarStore;
+
+  /**
+   * column groups
+   * e.g
+   * {{0,1,2},3,4,{5,6}}
+   */
+  private int[][] columnGroups;
+
+  /**
+   * return columnSplit
+   *
+   * @return
+   */
+  public int[] getColumnSplit() {
+    return columnSplit;
+  }
+
+  /**
+   * set columnSplit
+   *
+   * @param split
+   */
+  public void setColumnSplit(int[] split) {
+    this.columnSplit = split;
+  }
+
+  /**
+   * @return no of columnar block
+   */
+  public int getNoOfColumnStore() {
+    return this.noOfColumnsStore;
+  }
+
+  /**
+   * set no of columnar block
+   *
+   * @param noOfColumnsStore
+   */
+  public void setNoOfColumnStore(int noOfColumnsStore) {
+    this.noOfColumnsStore = noOfColumnsStore;
+  }
+
+  /**
+   * it's an identifier for row block or single column block
+   *
+   * @param columnarStore
+   */
+  public void setColumnarStore(boolean[] columnarStore) {
+    this.columnarStore = columnarStore;
+  }
+
+  /**
+   * set column groups
+   *
+   * @param columnGroups
+   */
+  public void setColumnGroup(int[][] columnGroups) {
+    this.columnGroups = columnGroups;
+  }
+
+  /**
+   * check if given column group is columnar
+   *
+   * @param colGroup
+   * @return true if given block is columnar
+   */
+  public boolean isColumnar(int colGroup) {
+    return columnarStore[colGroup];
+  }
+
+  /**
+   * @return columngroups
+   */
+  public int[][] getColumnGroup() {
+    return this.columnGroups;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnWithIntIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnWithIntIndex.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnWithIntIndex.java
new file mode 100644
index 0000000..36606a5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnWithIntIndex.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+import java.util.Arrays;
+
+import org.apache.carbondata.core.util.ByteUtil;
+
+public class ColumnWithIntIndex implements Comparable<ColumnWithIntIndex> {
+  protected byte[] column;
+
+  private int index;
+
+  public ColumnWithIntIndex(byte[] column, int index) {
+    this.column = column;
+    this.index = index;
+  }
+
+  public ColumnWithIntIndex() {
+  }
+
+  /**
+   * @return the column
+   */
+  public byte[] getColumn() {
+    return column;
+  }
+
+  /**
+   * @param column the column to set
+   */
+  public void setColumn(byte[] column) {
+    this.column = column;
+  }
+
+  /**
+   * @return the index
+   */
+  public int getIndex() {
+    return index;
+  }
+
+  /**
+   * @param index the index to set
+   */
+  public void setIndex(int index) {
+    this.index = index;
+  }
+
+  @Override public int compareTo(ColumnWithIntIndex o) {
+    return ByteUtil.UnsafeComparer.INSTANCE.compareTo(column, o.column);
+  }
+
+  @Override public boolean equals(Object obj) {
+    if(obj == null || getClass() != obj.getClass()) {
+      return false;
+    }
+    ColumnWithIntIndex o = (ColumnWithIntIndex)obj;
+    return Arrays.equals(column, o.column) && index == o.index;
+  }
+
+  @Override public int hashCode() {
+    return Arrays.hashCode(column) + index;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnWithIntIndexForHighCard.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnWithIntIndexForHighCard.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnWithIntIndexForHighCard.java
new file mode 100644
index 0000000..61a1165
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnWithIntIndexForHighCard.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+import java.util.Arrays;
+
+import org.apache.carbondata.core.util.ByteUtil.UnsafeComparer;
+
+public class ColumnWithIntIndexForHighCard extends ColumnWithIntIndex
+    implements Comparable<ColumnWithIntIndex> {
+
+  public ColumnWithIntIndexForHighCard(byte[] column, int index) {
+    super(column, index);
+  }
+
+  @Override public int compareTo(ColumnWithIntIndex o) {
+    return UnsafeComparer.INSTANCE
+        .compareTo(column, 2, column.length - 2, o.column, 2, o.column.length - 2);
+  }
+
+  @Override public boolean equals(Object obj) {
+    if(obj == null || getClass() != obj.getClass()) {
+      return false;
+    }
+    ColumnWithIntIndexForHighCard o = (ColumnWithIntIndexForHighCard)obj;
+    return Arrays.equals(column, o.column) && getIndex() == o.getIndex();
+  }
+
+  @Override public int hashCode() {
+    return Arrays.hashCode(column) + getIndex();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStore.java
new file mode 100644
index 0000000..9e12847
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStore.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+public interface ColumnarKeyStore {
+  /**
+   * This method will be used to get the actual mdkeys array present in the
+   * carbon store, it will read and uncomnpress the key
+   *
+   * @param fileHolder
+   * @return mdkey
+   * @noDictionaryValKeyIndexes, directkey indexes for determining the NO_DICTIONARY
+   * Col inorder to process the direct surrogates data.
+   */
+  ColumnarKeyStoreDataHolder[] getUnCompressedKeyArray(FileHolder fileHolder, int[] blockIndex,
+      boolean[] needCompressedData, int[] noDictionaryValKeyIndexes);
+
+  /**
+   * This method will be used to get the actual mdkeys array present in the
+   * carbon store, it will read and uncomnpress the key
+   *
+   * @param fileHolder
+   * @return mdkey
+   */
+  ColumnarKeyStoreDataHolder getUnCompressedKeyArray(FileHolder fileHolder, int blockIndex,
+      boolean needCompressedData, int[] noDictionaryValKeyIndexes);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreDataHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreDataHolder.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreDataHolder.java
new file mode 100644
index 0000000..b66d957
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreDataHolder.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+public class ColumnarKeyStoreDataHolder {
+  private byte[] keyblockData;
+  private List<byte[]> noDictionaryValBasedKeyBlockData;
+  private ColumnarKeyStoreMetadata columnarKeyStoreMetadata;
+
+  public ColumnarKeyStoreDataHolder(final byte[] keyblockData,
+      final ColumnarKeyStoreMetadata columnarKeyStoreMetadata) {
+    this.keyblockData = keyblockData;
+    this.columnarKeyStoreMetadata = columnarKeyStoreMetadata;
+  }
+
+  //Added constructor for holding noDictionaryValBasedKeyBlockData
+  public ColumnarKeyStoreDataHolder(final List<byte[]> noDictionaryValBasedKeyBlockData,
+      final ColumnarKeyStoreMetadata columnarKeyStoreMetadata) {
+    this.noDictionaryValBasedKeyBlockData = noDictionaryValBasedKeyBlockData;
+    this.columnarKeyStoreMetadata = columnarKeyStoreMetadata;
+  }
+
+  public byte[] getKeyBlockData() {
+    return keyblockData;
+  }
+
+  /**
+   * @return the columnarKeyStoreMetadata
+   */
+  public ColumnarKeyStoreMetadata getColumnarKeyStoreMetadata() {
+    return columnarKeyStoreMetadata;
+  }
+
+  public void unCompress() {
+    if (columnarKeyStoreMetadata.isUnCompressed()) {
+      return;
+    }
+    this.keyblockData = UnBlockIndexer
+        .uncompressData(keyblockData, columnarKeyStoreMetadata.getDataIndex(),
+            columnarKeyStoreMetadata.getEachRowSize());
+    columnarKeyStoreMetadata.setUnCompressed(true);
+  }
+
+  public int getSurrogateKey(int columnIndex) {
+    byte[] actual = new byte[4];
+    int startIndex;
+    if (null != columnarKeyStoreMetadata.getColumnReverseIndex()) {
+      startIndex =
+          columnarKeyStoreMetadata.getColumnReverseIndex()[columnIndex] * columnarKeyStoreMetadata
+              .getEachRowSize();
+    } else {
+      startIndex = columnIndex * columnarKeyStoreMetadata.getEachRowSize();
+    }
+    int destPos = 4 - columnarKeyStoreMetadata.getEachRowSize();
+    System.arraycopy(keyblockData, startIndex, actual, destPos,
+        columnarKeyStoreMetadata.getEachRowSize());
+    return ByteBuffer.wrap(actual).getInt();
+  }
+
+  /**
+   * get the byte[] for high cardinality column block
+   *
+   * @return List<byte[]>.
+   */
+  public List<byte[]> getNoDictionaryValBasedKeyBlockData() {
+    return noDictionaryValBasedKeyBlockData;
+  }
+
+  /**
+   * set the byte[] for high cardinality column block
+   *
+   * @param noDictionaryValBasedKeyBlockData
+   */
+  public void setNoDictionaryValBasedKeyBlockData(List<byte[]> noDictionaryValBasedKeyBlockData) {
+    this.noDictionaryValBasedKeyBlockData = noDictionaryValBasedKeyBlockData;
+  }
+}



[39/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataInMemoryStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataInMemoryStore.java
new file mode 100644
index 0000000..1effb25
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataInMemoryStore.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.data.uncompressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.MeasureDataWrapper;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.datastorage.store.impl.CompressedDataMeasureDataWrapper;
+
+/**
+ * DoubleArrayDataInMemoryStore.
+ *
+ * @author S71955
+ */
+public class DoubleArrayDataInMemoryStore extends AbstractDoubleArrayDataStore {
+
+  // /**
+  // * DoubleArrayDataInMemoryStore.
+  // * @param size
+  // * @param elementSize
+  // * @param compressionModel
+  // */
+  // public DoubleArrayDataInMemoryStore(int size, int elementSize,
+  // ValueCompressionModel compressionModel)
+  // {
+  // super(size, elementSize, compressionModel);
+  // }
+  //
+  // /**
+  // * DoubleArrayDataInMemoryStore.
+  // * @param size
+  // * @param elementSize
+  // */
+  // public DoubleArrayDataInMemoryStore(int size, int elementSize)
+  // {
+  // super(size, elementSize);
+  // }
+
+  // /**
+  // * DoubleArrayDataInMemoryStore.
+  // * @param size
+  // * @param elementSize
+  // * @param compressionModel
+  // * @param measuresOffsetsArray
+  // * @param measuresLengthArray
+  // * @param fileName
+  // * @param fileHolder
+  // */
+  // public DoubleArrayDataInMemoryStore(int size, int elementSize,
+  // ValueCompressionModel compressionModel,
+  // long[] measuresOffsetsArray, int[] measuresLengthArray, String fileName,
+  // FileHolder fileHolder)
+  // {
+  // super(size, elementSize, compressionModel);
+  // UnCompressValue[] unCompValues = compressionModel.getUnCompressValues();
+  // if(null != unCompValues)
+  // {
+  // for(int i = 0;i < measuresLengthArray.length;i++)
+  // {
+  //
+  // values[i] = unCompValues[i].getNew();
+  // values[i].setValueInBytes(fileHolder.readByteArray(fileName,
+  // measuresOffsetsArray[i],
+  // measuresLengthArray[i]));
+  // }
+  // }
+  // }
+
+  /**
+   * DoubleArrayDataInMemoryStore.
+   *
+   * @param size
+   * @param elementSize
+   * @param compressionModel
+   * @param measuresOffsetsArray
+   * @param measuresLengthArray
+   * @param fileName
+   * @param fileHolder
+   */
+  public DoubleArrayDataInMemoryStore(ValueCompressionModel compressionModel,
+      long[] measuresOffsetsArray, int[] measuresLengthArray, String fileName,
+      FileHolder fileHolder) {
+    super(compressionModel);
+    if (null != compressionModel) {
+      UnCompressValue[] unCompValues = compressionModel.getUnCompressValues();
+      if (null != unCompValues) {
+        for (int i = 0; i < measuresLengthArray.length; i++) {
+
+          values[i] = unCompValues[i].getNew();
+          values[i].setValueInBytes(
+              fileHolder.readByteArray(fileName, measuresOffsetsArray[i], measuresLengthArray[i]));
+        }
+      }
+    }
+  }
+
+  /**
+   * DoubleArrayDataInMemoryStore.
+   *
+   * @param size
+   * @param elementSize
+   * @param compressionModel
+   * @param measuresOffsetsArray
+   * @param measuresLengthArray
+   * @param fileName
+   * @param fileHolder
+   */
+  public DoubleArrayDataInMemoryStore(ValueCompressionModel compressionModel) {
+    super(compressionModel);
+  }
+
+  @Override public MeasureDataWrapper getBackData(int[] cols, FileHolder fileHolder) {
+    if (null == compressionModel) {
+      return null;
+    }
+    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
+    if (null == cols) {
+      for (int i = 0; i < vals.length; i++) {
+        vals[i] = values[i]
+            .getValues(compressionModel.getDecimal()[i], compressionModel.getMaxValue()[i]);
+      }
+    } else {
+      for (int i = 0; i < cols.length; i++) {
+        vals[cols[i]] = values[cols[i]].getValues(compressionModel.getDecimal()[cols[i]],
+            compressionModel.getMaxValue()[cols[i]]);
+      }
+    }
+    // return new CompressedDataMeasureDataWrapper(values,
+    // compressionModel.getDecimal(), compressionModel.getMaxValue());
+    return new CompressedDataMeasureDataWrapper(vals);
+  }
+
+  @Override public MeasureDataWrapper getBackData(int cols, FileHolder fileHolder) {
+    if (null == compressionModel) {
+      return null;
+    }
+    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
+
+    vals[cols] = values[cols]
+        .getValues(compressionModel.getDecimal()[cols], compressionModel.getMaxValue()[cols]);
+    return new CompressedDataMeasureDataWrapper(vals);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/AbstractColumnarKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/AbstractColumnarKeyStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/AbstractColumnarKeyStore.java
new file mode 100644
index 0000000..51c5fb8
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/AbstractColumnarKeyStore.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.columnar;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStore;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+
+public abstract class AbstractColumnarKeyStore implements ColumnarKeyStore {
+
+  /**
+   * compressor will be used to compress the data
+   */
+  protected static final Compressor<byte[]> COMPRESSOR =
+      SnappyCompression.SnappyByteCompression.INSTANCE;
+
+  protected ColumnarKeyStoreInfo columnarStoreInfo;
+
+  protected byte[][] columnarKeyBlockDataIndex;
+
+  protected byte[][] columnarKeyBlockData;
+
+  protected Map<Integer, Integer> mapOfColumnIndexAndColumnBlockIndex;
+
+  protected Map<Integer, Integer> mapOfAggDataIndex;
+
+  protected byte[][] columnarUniqueblockKeyBlockIndex;
+
+  public AbstractColumnarKeyStore(ColumnarKeyStoreInfo columnarStoreInfo, boolean isInMemory,
+      FileHolder fileHolder) {
+    this.columnarStoreInfo = columnarStoreInfo;
+    this.mapOfColumnIndexAndColumnBlockIndex =
+        new HashMap<Integer, Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    this.mapOfAggDataIndex =
+        new HashMap<Integer, Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    int index = 0;
+    for (int i = 0; i < this.columnarStoreInfo.getIsSorted().length; i++) {
+      if (!this.columnarStoreInfo.getIsSorted()[i]) {
+        this.mapOfColumnIndexAndColumnBlockIndex.put(i, index++);
+      }
+    }
+    index = 0;
+    for (int i = 0; i < this.columnarStoreInfo.getAggKeyBlock().length; i++) {
+      if (this.columnarStoreInfo.getAggKeyBlock()[i]) {
+        mapOfAggDataIndex.put(i, index++);
+      }
+    }
+    if (isInMemory) {
+      this.columnarKeyBlockData = new byte[this.columnarStoreInfo.getIsSorted().length][];
+      this.columnarKeyBlockDataIndex = new byte[this.mapOfColumnIndexAndColumnBlockIndex.size()][];
+      this.columnarUniqueblockKeyBlockIndex = new byte[this.mapOfAggDataIndex.size()][];
+      for (int i = 0; i < columnarStoreInfo.getSizeOfEachBlock().length; i++) {
+        columnarKeyBlockData[i] = fileHolder.readByteArray(columnarStoreInfo.getFilePath(),
+            columnarStoreInfo.getKeyBlockOffsets()[i], columnarStoreInfo.getKeyBlockLengths()[i]);
+
+        if (!this.columnarStoreInfo.getIsSorted()[i]) {
+          this.columnarKeyBlockDataIndex[mapOfColumnIndexAndColumnBlockIndex.get(i)] = fileHolder
+              .readByteArray(columnarStoreInfo.getFilePath(),
+                  columnarStoreInfo.getKeyBlockIndexOffsets()[mapOfColumnIndexAndColumnBlockIndex
+                      .get(i)],
+                  columnarStoreInfo.getKeyBlockIndexLength()[mapOfColumnIndexAndColumnBlockIndex
+                      .get(i)]);
+        }
+
+        if (this.columnarStoreInfo.getAggKeyBlock()[i]) {
+          this.columnarUniqueblockKeyBlockIndex[mapOfAggDataIndex.get(i)] = fileHolder
+              .readByteArray(columnarStoreInfo.getFilePath(),
+                  columnarStoreInfo.getDataIndexMapOffsets()[mapOfAggDataIndex.get(i)],
+                  columnarStoreInfo.getDataIndexMapLength()[mapOfAggDataIndex.get(i)]);
+        }
+      }
+    }
+  }
+
+  protected int[] getColumnIndexForNonFilter(int[] columnIndex) {
+    int[] columnIndexTemp = new int[columnIndex.length];
+
+    for (int i = 0; i < columnIndex.length; i++) {
+      columnIndexTemp[columnIndex[i]] = i;
+    }
+    return columnIndexTemp;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarFileKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarFileKeyStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarFileKeyStore.java
new file mode 100644
index 0000000..94d4b8f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarFileKeyStore.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.columnar.compressed;
+
+import java.util.List;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreMetadata;
+import org.apache.carbondata.core.datastorage.store.columnar.UnBlockIndexer;
+import org.apache.carbondata.core.datastorage.store.impl.key.columnar.AbstractColumnarKeyStore;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+public class CompressedColumnarFileKeyStore extends AbstractColumnarKeyStore {
+
+  public CompressedColumnarFileKeyStore(ColumnarKeyStoreInfo columnarStoreInfo) {
+    super(columnarStoreInfo, false, null);
+  }
+
+  @Override public ColumnarKeyStoreDataHolder[] getUnCompressedKeyArray(FileHolder fileHolder,
+      int[] blockIndex, boolean[] needCompressedData, int[] noDictionaryColIndexes) {
+    ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolders =
+        new ColumnarKeyStoreDataHolder[blockIndex.length];
+
+    for (int i = 0; i < columnarKeyStoreDataHolders.length; i++) {
+      byte[] columnarKeyBlockData = null;
+      int[] columnKeyBlockIndex = null;
+      int[] columnKeyBlockReverseIndexes = null;
+      ColumnarKeyStoreMetadata columnarKeyStoreMetadata = null;
+      int[] dataIndex = null;
+      boolean isUnCompressed = true;
+      columnarKeyBlockData = COMPRESSOR.unCompress(fileHolder
+          .readByteArray(columnarStoreInfo.getFilePath(),
+              columnarStoreInfo.getKeyBlockOffsets()[blockIndex[i]],
+              columnarStoreInfo.getKeyBlockLengths()[blockIndex[i]]));
+      boolean isNoDictionaryBlock =
+          CompressedColumnarKeyStoreUtil.isNoDictionaryBlock(noDictionaryColIndexes, blockIndex[i]);
+      if (!isNoDictionaryBlock && this.columnarStoreInfo.getAggKeyBlock()[blockIndex[i]]) {
+        dataIndex = columnarStoreInfo.getNumberCompressor().unCompress(fileHolder
+            .readByteArray(columnarStoreInfo.getFilePath(),
+                columnarStoreInfo.getDataIndexMapOffsets()[mapOfAggDataIndex.get(blockIndex[i])],
+                columnarStoreInfo.getDataIndexMapLength()[mapOfAggDataIndex.get(blockIndex[i])]));
+        if (!needCompressedData[i]) {
+          columnarKeyBlockData = UnBlockIndexer.uncompressData(columnarKeyBlockData, dataIndex,
+              columnarStoreInfo.getSizeOfEachBlock()[blockIndex[i]]);
+          dataIndex = null;
+        } else {
+          isUnCompressed = false;
+        }
+      }
+      if (!columnarStoreInfo.getIsSorted()[blockIndex[i]]) {
+        columnKeyBlockIndex = CarbonUtil
+            .getUnCompressColumnIndex(columnarStoreInfo.getKeyBlockIndexLength()[blockIndex[i]],
+                fileHolder.readByteArray(columnarStoreInfo.getFilePath(),
+                    columnarStoreInfo.getKeyBlockIndexOffsets()[blockIndex[i]],
+                    columnarStoreInfo.getKeyBlockIndexLength()[blockIndex[i]]),
+                columnarStoreInfo.getNumberCompressor());
+        columnKeyBlockReverseIndexes = getColumnIndexForNonFilter(columnKeyBlockIndex);
+      }
+      //Since its an high cardinality dimension adding the direct surrogates as part of
+      //columnarKeyStoreMetadata so that later it will be used with bytearraywrapper instance.
+      if (isNoDictionaryBlock) {
+        columnarKeyStoreMetadata = new ColumnarKeyStoreMetadata(0);
+        columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
+        columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndexes);
+        columnarKeyStoreMetadata.setNoDictionaryValColumn(true);
+        columnarKeyStoreMetadata.setUnCompressed(true);
+        columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[i]]);
+        //System is reading the direct surrogates data from byte array which contains both
+        // length and the direct surrogates data
+        List<byte[]> noDictionaryValBasedKeyBlockData = CompressedColumnarKeyStoreUtil
+            .readColumnarKeyBlockDataForNoDictionaryCols(columnarKeyBlockData);
+        columnarKeyStoreDataHolders[i] =
+            new ColumnarKeyStoreDataHolder(noDictionaryValBasedKeyBlockData,
+                columnarKeyStoreMetadata);
+      } else {
+        columnarKeyStoreMetadata =
+            new ColumnarKeyStoreMetadata(columnarStoreInfo.getSizeOfEachBlock()[blockIndex[i]]);
+        columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
+        columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[i]]);
+        columnarKeyStoreMetadata.setDataIndex(dataIndex);
+        columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndexes);
+        columnarKeyStoreMetadata.setUnCompressed(isUnCompressed);
+        columnarKeyStoreDataHolders[i] =
+            new ColumnarKeyStoreDataHolder(columnarKeyBlockData, columnarKeyStoreMetadata);
+      }
+    }
+    return columnarKeyStoreDataHolders;
+  }
+
+  @Override
+  public ColumnarKeyStoreDataHolder getUnCompressedKeyArray(FileHolder fileHolder, int blockIndex,
+      boolean needCompressedData, int[] noDictionaryColIndexes) {
+    byte[] columnarKeyBlockData = null;
+    int[] columnKeyBlockIndex = null;
+    int[] columnKeyBlockReverseIndex = null;
+    ColumnarKeyStoreMetadata columnarKeyStoreMetadata = null;
+    int[] dataIndex = null;
+    boolean isUnCompressed = true;
+    columnarKeyBlockData = COMPRESSOR.unCompress(fileHolder
+        .readByteArray(columnarStoreInfo.getFilePath(),
+            columnarStoreInfo.getKeyBlockOffsets()[blockIndex],
+            columnarStoreInfo.getKeyBlockLengths()[blockIndex]));
+    boolean isNoDictionaryBlock =
+        CompressedColumnarKeyStoreUtil.isNoDictionaryBlock(noDictionaryColIndexes, blockIndex);
+    if (!isNoDictionaryBlock && this.columnarStoreInfo.getAggKeyBlock()[blockIndex]) {
+      dataIndex = columnarStoreInfo.getNumberCompressor().unCompress(fileHolder
+          .readByteArray(columnarStoreInfo.getFilePath(),
+              columnarStoreInfo.getDataIndexMapOffsets()[mapOfAggDataIndex.get(blockIndex)],
+              columnarStoreInfo.getDataIndexMapLength()[mapOfAggDataIndex.get(blockIndex)]));
+      if (!needCompressedData) {
+        columnarKeyBlockData = UnBlockIndexer.uncompressData(columnarKeyBlockData, dataIndex,
+            columnarStoreInfo.getSizeOfEachBlock()[blockIndex]);
+        dataIndex = null;
+      } else {
+        isUnCompressed = false;
+      }
+    }
+    if (!columnarStoreInfo.getIsSorted()[blockIndex]) {
+      columnKeyBlockIndex = CarbonUtil
+          .getUnCompressColumnIndex(columnarStoreInfo.getKeyBlockIndexLength()[blockIndex],
+              fileHolder.readByteArray(columnarStoreInfo.getFilePath(),
+                  columnarStoreInfo.getKeyBlockIndexOffsets()[blockIndex],
+                  columnarStoreInfo.getKeyBlockIndexLength()[blockIndex]),
+              columnarStoreInfo.getNumberCompressor());
+      columnKeyBlockReverseIndex = getColumnIndexForNonFilter(columnKeyBlockIndex);
+    }
+    //Since its an high cardinality dimension, For filter queries.
+    if (isNoDictionaryBlock) {
+      columnarKeyStoreMetadata = new ColumnarKeyStoreMetadata(0);
+      ColumnarKeyStoreDataHolder columnarKeyStoreDataHolders = CompressedColumnarKeyStoreUtil
+          .createColumnarKeyStoreMetadataForHCDims(blockIndex, columnarKeyBlockData,
+              columnKeyBlockIndex, columnKeyBlockReverseIndex, columnarStoreInfo);
+      new ColumnarKeyStoreDataHolder(columnarKeyBlockData, columnarKeyStoreMetadata);
+      return columnarKeyStoreDataHolders;
+    }
+    columnarKeyStoreMetadata =
+        new ColumnarKeyStoreMetadata(columnarStoreInfo.getSizeOfEachBlock()[blockIndex]);
+    columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
+    columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex]);
+    columnarKeyStoreMetadata.setDataIndex(dataIndex);
+    columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndex);
+    columnarKeyStoreMetadata.setUnCompressed(isUnCompressed);
+
+    ColumnarKeyStoreDataHolder columnarKeyStoreDataHolders =
+        new ColumnarKeyStoreDataHolder(columnarKeyBlockData, columnarKeyStoreMetadata);
+    return columnarKeyStoreDataHolders;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarInMemoryStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarInMemoryStore.java
new file mode 100644
index 0000000..5d3d4b5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarInMemoryStore.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.columnar.compressed;
+
+import java.util.List;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreMetadata;
+import org.apache.carbondata.core.datastorage.store.columnar.UnBlockIndexer;
+import org.apache.carbondata.core.datastorage.store.impl.key.columnar.AbstractColumnarKeyStore;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+public class CompressedColumnarInMemoryStore extends AbstractColumnarKeyStore {
+
+  public CompressedColumnarInMemoryStore(ColumnarKeyStoreInfo columnarStoreInfo,
+      FileHolder fileHolder) {
+    super(columnarStoreInfo, true, fileHolder);
+  }
+
+  @Override public ColumnarKeyStoreDataHolder[] getUnCompressedKeyArray(FileHolder fileHolder,
+      int[] blockIndex, boolean[] needCompressedData, int[] noDictionaryColIndexes) {
+    ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolders =
+        new ColumnarKeyStoreDataHolder[blockIndex.length];
+    for (int i = 0; i < columnarKeyStoreDataHolders.length; i++) {
+      byte[] columnarKeyBlockDataTemp = null;
+      int[] columnKeyBlockIndex = null;
+      int[] columnKeyBlockReverseIndexes = null;
+      ColumnarKeyStoreMetadata columnarKeyStoreMetadata = null;
+      int columnarKeyBlockIndex = 0;
+      int[] dataIndex = null;
+      boolean isUnCompressed = true;
+      columnarKeyBlockDataTemp = COMPRESSOR.unCompress(columnarKeyBlockData[blockIndex[i]]);
+      boolean isNoDictionaryBlock =
+          CompressedColumnarKeyStoreUtil.isNoDictionaryBlock(noDictionaryColIndexes, blockIndex[i]);
+      if (!isNoDictionaryBlock && this.columnarStoreInfo.getAggKeyBlock()[blockIndex[i]]) {
+        dataIndex = columnarStoreInfo.getNumberCompressor()
+            .unCompress(columnarUniqueblockKeyBlockIndex[mapOfAggDataIndex.get(blockIndex[i])]);
+        if (!needCompressedData[i]) {
+          columnarKeyBlockDataTemp = UnBlockIndexer
+              .uncompressData(columnarKeyBlockDataTemp, dataIndex,
+                  columnarStoreInfo.getSizeOfEachBlock()[blockIndex[i]]);
+          dataIndex = null;
+        } else {
+          isUnCompressed = false;
+        }
+      }
+      if (!columnarStoreInfo.getIsSorted()[blockIndex[i]]) {
+        columnarKeyBlockIndex = mapOfColumnIndexAndColumnBlockIndex.get(blockIndex[i]);
+        columnKeyBlockIndex = CarbonUtil.getUnCompressColumnIndex(
+            columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex],
+            columnarKeyBlockDataIndex[columnarKeyBlockIndex],
+            columnarStoreInfo.getNumberCompressor());
+        columnKeyBlockReverseIndexes = getColumnIndexForNonFilter(columnKeyBlockIndex);
+      }
+      if (isNoDictionaryBlock) {
+        columnarKeyStoreMetadata = new ColumnarKeyStoreMetadata(0);
+        columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
+        columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndexes);
+        columnarKeyStoreMetadata.setNoDictionaryValColumn(true);
+        columnarKeyStoreMetadata.setUnCompressed(true);
+        columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[i]]);
+        //System is reading the direct surrogates data from byte array which contains both
+        // length and the direct surrogates data
+        List<byte[]> noDictionaryValBasedKeyBlockData = CompressedColumnarKeyStoreUtil
+            .readColumnarKeyBlockDataForNoDictionaryCols(columnarKeyBlockDataTemp);
+        columnarKeyStoreDataHolders[i] =
+            new ColumnarKeyStoreDataHolder(noDictionaryValBasedKeyBlockData,
+                columnarKeyStoreMetadata);
+      }
+      columnarKeyStoreMetadata =
+          new ColumnarKeyStoreMetadata(columnarStoreInfo.getSizeOfEachBlock()[blockIndex[i]]);
+      columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
+      columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[i]]);
+      columnarKeyStoreMetadata.setDataIndex(dataIndex);
+      columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndexes);
+      columnarKeyStoreMetadata.setUnCompressed(isUnCompressed);
+      columnarKeyStoreDataHolders[i] =
+          new ColumnarKeyStoreDataHolder(columnarKeyBlockDataTemp, columnarKeyStoreMetadata);
+    }
+    return columnarKeyStoreDataHolders;
+  }
+
+  @Override
+  public ColumnarKeyStoreDataHolder getUnCompressedKeyArray(FileHolder fileHolder, int blockIndex,
+      boolean needCompressedData, int[] noDictionaryVals) {
+
+    byte[] columnarKeyBlockDataTemp = null;
+    int[] columnKeyBlockIndex = null;
+    int[] columnKeyBlockReverseIndex = null;
+    ColumnarKeyStoreMetadata columnarKeyStoreMetadata = null;
+    int columnarKeyBlockIndex = 0;
+    int[] dataIndex = null;
+    boolean isUnCompressed = true;
+    columnarKeyBlockDataTemp = COMPRESSOR.unCompress(columnarKeyBlockData[blockIndex]);
+    boolean isNoDictionaryBlock =
+        CompressedColumnarKeyStoreUtil.isNoDictionaryBlock(noDictionaryVals, blockIndex);
+    if (!isNoDictionaryBlock && this.columnarStoreInfo.getAggKeyBlock()[blockIndex]) {
+      dataIndex = columnarStoreInfo.getNumberCompressor()
+          .unCompress(columnarUniqueblockKeyBlockIndex[mapOfAggDataIndex.get(blockIndex)]);
+      if (!needCompressedData) {
+        columnarKeyBlockDataTemp = UnBlockIndexer
+            .uncompressData(columnarKeyBlockDataTemp, dataIndex,
+                columnarStoreInfo.getSizeOfEachBlock()[blockIndex]);
+        dataIndex = null;
+      } else {
+        isUnCompressed = false;
+      }
+    }
+    if (!columnarStoreInfo.getIsSorted()[blockIndex]) {
+      columnarKeyBlockIndex = mapOfColumnIndexAndColumnBlockIndex.get(blockIndex);
+      columnKeyBlockIndex = CarbonUtil.getUnCompressColumnIndex(
+          columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex],
+          columnarKeyBlockDataIndex[columnarKeyBlockIndex],
+          columnarStoreInfo.getNumberCompressor());
+      columnKeyBlockReverseIndex = getColumnIndexForNonFilter(columnKeyBlockIndex);
+    }
+    if (isNoDictionaryBlock) {
+      ColumnarKeyStoreDataHolder colKeystoreDataHolders = CompressedColumnarKeyStoreUtil
+          .createColumnarKeyStoreMetadataForHCDims(blockIndex, columnarKeyBlockDataTemp,
+              columnKeyBlockIndex, columnKeyBlockReverseIndex, columnarStoreInfo);
+      return colKeystoreDataHolders;
+    }
+    columnarKeyStoreMetadata =
+        new ColumnarKeyStoreMetadata(columnarStoreInfo.getSizeOfEachBlock()[blockIndex]);
+    columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
+    columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex]);
+    columnarKeyStoreMetadata.setDataIndex(dataIndex);
+    columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndex);
+    columnarKeyStoreMetadata.setUnCompressed(isUnCompressed);
+    ColumnarKeyStoreDataHolder columnarKeyStoreDataHolders =
+        new ColumnarKeyStoreDataHolder(columnarKeyBlockDataTemp, columnarKeyStoreMetadata);
+    return columnarKeyStoreDataHolders;
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarKeyStoreUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarKeyStoreUtil.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarKeyStoreUtil.java
new file mode 100644
index 0000000..b0d7ff8
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarKeyStoreUtil.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.datastorage.store.impl.key.columnar.compressed;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreMetadata;
+
+/**
+ * Utility helper class for managing the processing of columnar key store block.
+ */
+public final class CompressedColumnarKeyStoreUtil {
+
+  private CompressedColumnarKeyStoreUtil() {
+
+  }
+
+  /**
+   * @param columnarKeyBlockData
+   * @param columnarKeyStoreMetadata
+   * @return
+   * @author s71955 The high cardinality dimensions rows will be send in byte
+   * array with its data length appended in the
+   * ColumnarKeyStoreDataHolder byte array since high cardinality dim
+   * data will not be part of MDKey/Surrogate keys. In this method the
+   * byte array will be scanned and the length which is stored in
+   * short will be removed.
+   */
+  public static List<byte[]> readColumnarKeyBlockDataForNoDictionaryCols(
+      byte[] columnarKeyBlockData) {
+    List<byte[]> columnarKeyBlockDataList = new ArrayList<byte[]>(50);
+    ByteBuffer noDictionaryValKeyStoreDataHolder = ByteBuffer.allocate(columnarKeyBlockData.length);
+    noDictionaryValKeyStoreDataHolder.put(columnarKeyBlockData);
+    noDictionaryValKeyStoreDataHolder.flip();
+    while (noDictionaryValKeyStoreDataHolder.hasRemaining()) {
+      short dataLength = noDictionaryValKeyStoreDataHolder.getShort();
+      byte[] noDictionaryValKeyData = new byte[dataLength];
+      noDictionaryValKeyStoreDataHolder.get(noDictionaryValKeyData);
+      columnarKeyBlockDataList.add(noDictionaryValKeyData);
+    }
+    return columnarKeyBlockDataList;
+
+  }
+
+  /**
+   * @param blockIndex
+   * @param columnarKeyBlockData
+   * @param columnKeyBlockIndex
+   * @param columnKeyBlockReverseIndex
+   * @param columnarStoreInfo
+   * @return
+   */
+  public static ColumnarKeyStoreDataHolder createColumnarKeyStoreMetadataForHCDims(int blockIndex,
+      byte[] columnarKeyBlockData, int[] columnKeyBlockIndex, int[] columnKeyBlockReverseIndex,
+      ColumnarKeyStoreInfo columnarStoreInfo) {
+    ColumnarKeyStoreMetadata columnarKeyStoreMetadata;
+    columnarKeyStoreMetadata = new ColumnarKeyStoreMetadata(0);
+    columnarKeyStoreMetadata.setNoDictionaryValColumn(true);
+    columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
+    columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndex);
+    columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex]);
+    columnarKeyStoreMetadata.setUnCompressed(true);
+    List<byte[]> noDictionaryValBasedKeyBlockData = CompressedColumnarKeyStoreUtil
+        .readColumnarKeyBlockDataForNoDictionaryCols(columnarKeyBlockData);
+    ColumnarKeyStoreDataHolder columnarKeyStoreDataHolders =
+        new ColumnarKeyStoreDataHolder(noDictionaryValBasedKeyBlockData, columnarKeyStoreMetadata);
+    return columnarKeyStoreDataHolders;
+  }
+
+  /**
+   * This API will determine whether the requested block index is a  No dictionary
+   * column index.
+   *
+   * @param noDictionaryColIndexes
+   * @param blockIndex
+   * @return
+   */
+  public static boolean isNoDictionaryBlock(int[] noDictionaryColIndexes, int blockIndex) {
+    if (null != noDictionaryColIndexes) {
+      for (int noDictionaryValIndex : noDictionaryColIndexes) {
+        if (noDictionaryValIndex == blockIndex) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarFileKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarFileKeyStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarFileKeyStore.java
new file mode 100644
index 0000000..d0b17dc
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarFileKeyStore.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.columnar.uncompressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreMetadata;
+import org.apache.carbondata.core.datastorage.store.columnar.UnBlockIndexer;
+import org.apache.carbondata.core.datastorage.store.impl.key.columnar.AbstractColumnarKeyStore;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+public class UnCompressedColumnarFileKeyStore extends AbstractColumnarKeyStore {
+
+  public UnCompressedColumnarFileKeyStore(ColumnarKeyStoreInfo columnarStoreInfo) {
+    super(columnarStoreInfo, false, null);
+  }
+
+  @Override public ColumnarKeyStoreDataHolder[] getUnCompressedKeyArray(FileHolder fileHolder,
+      int[] blockIndex, boolean[] needCompressedData, int[] noDictionaryColIndexes) {
+    ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolders =
+        new ColumnarKeyStoreDataHolder[blockIndex.length];
+    byte[] columnarKeyBlockData = null;
+    int[] columnKeyBlockIndex = null;
+    ColumnarKeyStoreMetadata columnarKeyStoreMetadata = null;
+    int columnarKeyBlockIndex = 0;
+    int[] dataIndex = null;
+    int[] columnKeyBlockReverseIndex = null;
+    for (int j = 0; j < columnarKeyStoreDataHolders.length; j++) {
+      columnarKeyBlockData = fileHolder.readByteArray(columnarStoreInfo.getFilePath(),
+          columnarStoreInfo.getKeyBlockOffsets()[blockIndex[j]],
+          columnarStoreInfo.getKeyBlockLengths()[blockIndex[j]]);
+      if (this.columnarStoreInfo.getAggKeyBlock()[blockIndex[j]]) {
+        dataIndex = columnarStoreInfo.getNumberCompressor().unCompress(fileHolder
+            .readByteArray(columnarStoreInfo.getFilePath(),
+                columnarStoreInfo.getDataIndexMapOffsets()[mapOfAggDataIndex.get(blockIndex[j])],
+                columnarStoreInfo.getDataIndexMapLength()[mapOfAggDataIndex.get(blockIndex[j])]));
+        if (!needCompressedData[j]) {
+          columnarKeyBlockData = UnBlockIndexer.uncompressData(columnarKeyBlockData, dataIndex,
+              columnarStoreInfo.getSizeOfEachBlock()[blockIndex[j]]);
+          dataIndex = null;
+        }
+      }
+      if (!columnarStoreInfo.getIsSorted()[blockIndex[j]]) {
+        columnarKeyBlockIndex = mapOfColumnIndexAndColumnBlockIndex.get(blockIndex[j]);
+        columnKeyBlockIndex = CarbonUtil.getUnCompressColumnIndex(
+            columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex], fileHolder
+                .readByteArray(columnarStoreInfo.getFilePath(),
+                    columnarStoreInfo.getKeyBlockIndexOffsets()[columnarKeyBlockIndex],
+                    columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex]),
+            columnarStoreInfo.getNumberCompressor());
+        columnKeyBlockReverseIndex = getColumnIndexForNonFilter(columnKeyBlockIndex);
+      }
+      columnarKeyStoreMetadata =
+          new ColumnarKeyStoreMetadata(columnarStoreInfo.getSizeOfEachBlock()[blockIndex[j]]);
+      columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[j]]);
+      columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
+      columnarKeyStoreMetadata.setDataIndex(dataIndex);
+      columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndex);
+      columnarKeyStoreDataHolders[j] =
+          new ColumnarKeyStoreDataHolder(columnarKeyBlockData, columnarKeyStoreMetadata);
+    }
+    return columnarKeyStoreDataHolders;
+  }
+
+  @Override
+  public ColumnarKeyStoreDataHolder getUnCompressedKeyArray(FileHolder fileHolder, int blockIndex,
+      boolean needCompressedData, int[] noDictionaryColIndexes) {
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarInMemoryStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarInMemoryStore.java
new file mode 100644
index 0000000..da69e01
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarInMemoryStore.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.columnar.uncompressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
+import org.apache.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreMetadata;
+import org.apache.carbondata.core.datastorage.store.impl.key.columnar.AbstractColumnarKeyStore;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+public class UnCompressedColumnarInMemoryStore extends AbstractColumnarKeyStore {
+
+  public UnCompressedColumnarInMemoryStore(ColumnarKeyStoreInfo columnarStoreInfo,
+      FileHolder fileHolder) {
+    super(columnarStoreInfo, true, fileHolder);
+  }
+
+  @Override public ColumnarKeyStoreDataHolder[] getUnCompressedKeyArray(FileHolder fileHolder,
+      int[] blockIndex, boolean[] needCompressedData, int[] noDictionaryColIndexes) {
+    int columnarKeyBlockIndex = 0;
+    int[] columnIndex = null;
+    ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolders =
+        new ColumnarKeyStoreDataHolder[blockIndex.length];
+    ColumnarKeyStoreMetadata columnarKeyStoreMetadataTemp = null;
+    for (int i = 0; i < columnarKeyStoreDataHolders.length; i++) {
+      columnarKeyStoreMetadataTemp = new ColumnarKeyStoreMetadata(0);
+      if (!columnarStoreInfo.getIsSorted()[blockIndex[i]]) {
+        columnarKeyBlockIndex = mapOfColumnIndexAndColumnBlockIndex.get(blockIndex[i]);
+        columnIndex = CarbonUtil.getUnCompressColumnIndex(
+            columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex], fileHolder
+                .readByteArray(columnarStoreInfo.getFilePath(),
+                    columnarStoreInfo.getKeyBlockIndexOffsets()[columnarKeyBlockIndex],
+                    columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex]),
+            columnarStoreInfo.getNumberCompressor());
+        columnIndex = getColumnIndexForNonFilter(columnIndex);
+        columnarKeyStoreMetadataTemp.setColumnIndex(columnIndex);
+      }
+      columnarKeyStoreMetadataTemp.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[i]]);
+      columnarKeyStoreDataHolders[i] =
+          new ColumnarKeyStoreDataHolder(columnarKeyBlockData[blockIndex[i]],
+              columnarKeyStoreMetadataTemp);
+    }
+    return columnarKeyStoreDataHolders;
+  }
+
+  @Override
+  public ColumnarKeyStoreDataHolder getUnCompressedKeyArray(FileHolder fileHolder, int blockIndex,
+      boolean needCompressedData, int[] noDictionaryVals) {
+    return null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/AbstractCompressedSingleArrayStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/AbstractCompressedSingleArrayStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/AbstractCompressedSingleArrayStore.java
new file mode 100644
index 0000000..493b61f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/AbstractCompressedSingleArrayStore.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.compressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.NodeKeyStore;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+
+public abstract class AbstractCompressedSingleArrayStore implements NodeKeyStore {
+
+  /**
+   * compressor will be used to compress the data
+   */
+  protected static final Compressor<byte[]> COMPRESSOR =
+      SnappyCompression.SnappyByteCompression.INSTANCE;
+  /**
+   * size of each element
+   */
+  protected final int sizeOfEachElement;
+  /**
+   * data store which will hold the data
+   */
+  protected byte[] datastore;
+  /**
+   * total number of elements;
+   */
+  protected int totalNumberOfElements;
+
+  public AbstractCompressedSingleArrayStore(int size, int elementSize) {
+    this(size, elementSize, true);
+  }
+
+  public AbstractCompressedSingleArrayStore(int size, int elementSize, boolean createDataStore) {
+    this.sizeOfEachElement = elementSize;
+    this.totalNumberOfElements = size;
+    if (createDataStore) {
+      datastore = new byte[this.totalNumberOfElements * this.sizeOfEachElement];
+    }
+  }
+
+  /**
+   * This method will be used to insert key to store
+   */
+  @Override public void put(int index, byte[] value) {
+    System.arraycopy(value, 0, datastore, ((index) * sizeOfEachElement), sizeOfEachElement);
+  }
+
+  /**
+   * This method will be used to get the writable key array.
+   * writable key array will hold below information:
+   * <size of key array><key array>
+   * total length will be stored in 4 bytes+ key array length for key array
+   *
+   * @return writable array (compressed or normal)
+   */
+  @Override public byte[] getWritableKeyArray() {
+    // compress the data store
+    byte[] compressedKeys = COMPRESSOR.compress(datastore);
+    return compressedKeys;
+  }
+
+  /**
+   * This method will be used to get the actual key array present in the
+   * store .
+   * Here back array will be uncompress array
+   *
+   * @param fileHolder file holder will be used to read the file
+   * @return uncompressed keys
+   * will return uncompressed key
+   */
+  @Override public byte[] getBackArray(FileHolder fileHolder) {
+    return COMPRESSOR.unCompress(datastore);
+  }
+
+  /**
+   * This method will be used to get the key array based on index
+   *
+   * @param index      index in store
+   * @param fileHolder file holder will be used to read the file
+   * @return key
+   */
+  @Override public byte[] get(int index, FileHolder fileHolder) {
+    // uncompress the store data
+    byte[] unCompress = COMPRESSOR.unCompress(datastore);
+    // create new array of size of each element
+    byte[] copy = new byte[sizeOfEachElement];
+    // copy array for given index
+    // copy will done based on below calculation
+    // eg: index is 4 and size of each key is 6 then copy from 6*4= 24th
+    // index till 29th index
+    System.arraycopy(unCompress, ((index) * sizeOfEachElement), copy, 0, sizeOfEachElement);
+    return copy;
+  }
+
+  /**
+   * This method will clear the store and create the new empty store
+   */
+  @Override public void clear() {
+    datastore = new byte[this.totalNumberOfElements * this.sizeOfEachElement];
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyFileStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyFileStore.java
new file mode 100644
index 0000000..0d113d6
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyFileStore.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.compressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+public class CompressedSingleArrayKeyFileStore extends AbstractCompressedSingleArrayStore {
+
+  /**
+   * offset, this will be used for seek position
+   */
+  private long offset;
+
+  /**
+   * fully qualified file path
+   */
+  private String filePath;
+
+  /**
+   * length to be read
+   */
+  private int length;
+
+  public CompressedSingleArrayKeyFileStore(int size, int elementSize, long offset, String filePath,
+      int length) {
+    super(size, elementSize, false);
+    this.offset = offset;
+    this.filePath = filePath;
+    this.length = length;
+  }
+
+  /**
+   * This method will be used to get the actual keys array present in the
+   * store . Here back array will be uncompress array. This method will first read
+   * the data from file based on offset and length then uncompress the array
+   * to get the actual array
+   *
+   * @param fileHolder file holder will be used to read the file
+   * @return uncompressed
+   * keys will return uncompressed key
+   */
+  @Override public byte[] getBackArray(FileHolder fileHolder) {
+    if (null != fileHolder) {
+      // read from file based on offset and index, fileholder will read that
+      // much byte from that offset, then uncompress and return
+      return COMPRESSOR.unCompress(fileHolder.readByteArray(filePath, offset, length));
+    } else {
+      return new byte[0];
+    }
+  }
+
+  /**
+   * This method will be used to get the key array based on index
+   * This method will first read
+   * the data from file based on offset and length then uncompress the array
+   * to get the actual array, then get the array for index and return
+   *
+   * @param index      index in store
+   * @param fileHolder file holder will be used to read the file
+   * @return key
+   */
+  @Override public byte[] get(int index, FileHolder fileHolder) {
+    // read from file based on offset and index, fileholder will read that
+    // much byte from that offset, then uncompress to get the actual array
+    byte[] unCompress = COMPRESSOR.unCompress(fileHolder.readByteArray(filePath, offset, length));
+    // create new array of size of each element
+    byte[] copy = new byte[sizeOfEachElement];
+    // copy array for given index
+    // copy will done based on below calculation
+    // eg: index is 4 and size of each key is 6 then copy from 6*4= 24th
+    // index till 29th index
+    System.arraycopy(unCompress, ((index) * sizeOfEachElement), copy, 0, sizeOfEachElement);
+    return copy;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyInMemoryStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyInMemoryStore.java
new file mode 100644
index 0000000..612d434
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyInMemoryStore.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.compressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+public class CompressedSingleArrayKeyInMemoryStore extends AbstractCompressedSingleArrayStore {
+  /**
+   * @param size
+   * @param elementSize
+   */
+  public CompressedSingleArrayKeyInMemoryStore(int size, int elementSize) {
+    super(size, elementSize);
+  }
+
+  /**
+   * @param size
+   * @param elementSize
+   * @param offset
+   * @param filePath
+   * @param fileHolder
+   * @param length
+   */
+  public CompressedSingleArrayKeyInMemoryStore(int size, int elementSize, long offset,
+      String filePath, FileHolder fileHolder, int length) {
+    this(size, elementSize);
+    datastore = fileHolder.readByteArray(filePath, offset, length);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/AbstractSingleArrayKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/AbstractSingleArrayKeyStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/AbstractSingleArrayKeyStore.java
new file mode 100644
index 0000000..e4141c3
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/AbstractSingleArrayKeyStore.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.uncompressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.NodeKeyStore;
+
+public abstract class AbstractSingleArrayKeyStore implements NodeKeyStore {
+
+  /**
+   * size of each element
+   */
+  protected final int sizeOfEachElement;
+  /**
+   * total number of elements
+   */
+  protected final int totalNumberOfElements;
+  /**
+   * data store which will hold the data
+   */
+  protected byte[] datastore;
+
+  public AbstractSingleArrayKeyStore(int size, int elementSize) {
+    this.sizeOfEachElement = elementSize;
+    this.totalNumberOfElements = size;
+    datastore = new byte[size * elementSize];
+  }
+
+  /**
+   * This method will be used to insert mdkey to store
+   *
+   * @param index index of mdkey
+   * @param value mdkey
+   */
+  @Override public void put(int index, byte[] value) {
+    System.arraycopy(value, 0, datastore, ((index) * sizeOfEachElement), sizeOfEachElement);
+  }
+
+  /**
+   * This method will be used to get the writable key array.
+   * writable key array will hold below information:
+   * <size of key array><key array>
+   * total length will be stored in 4 bytes+ key array length for key array
+   *
+   * @return writable array
+   */
+  @Override public byte[] getWritableKeyArray() {
+    // create and allocate size for byte buffer
+    //  4 bytes for size of array(for array length) + size of array(for array)
+    return datastore;
+  }
+
+  /**
+   * This method will be used to get the actual key array present in the
+   * store.
+   *
+   * @param fileHolder file holder will be used to read the file
+   * @return uncompressed keys
+   * will return uncompressed key
+   */
+  @Override public byte[] getBackArray(FileHolder fileHolder) {
+    return datastore;
+  }
+
+  /**
+   * This method will be used to get the key array based on index
+   *
+   * @param index      index in store
+   * @param fileHolder file holder will be used to read the file
+   * @return key
+   */
+  @Override public byte[] get(int index, FileHolder fileHolder) {
+    // create new array of size of each element
+    byte[] copy = new byte[sizeOfEachElement];
+
+    // copy array for given index
+    // copy will done based on below calculation
+    // eg: index is 4 and size of each key is 6 then copy from 6*4= 24th
+    // index till 29th index
+    System.arraycopy(datastore, ((index) * sizeOfEachElement), copy, 0, sizeOfEachElement);
+    return copy;
+  }
+
+  /**
+   * This method will clear the store and create the new empty store
+   */
+  @Override public void clear() {
+    datastore = new byte[this.totalNumberOfElements * this.sizeOfEachElement];
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyFileStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyFileStore.java
new file mode 100644
index 0000000..2d44245
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyFileStore.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.uncompressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+public class SingleArrayKeyFileStore extends AbstractSingleArrayKeyStore {
+  /**
+   * offset, this will be used for seek position
+   */
+  private long offset;
+
+  /**
+   * fully qualified file path
+   */
+  private String filePath;
+
+  /**
+   * length to be read
+   */
+  private int length;
+
+  /**
+   * @param size
+   * @param elementSize
+   */
+  public SingleArrayKeyFileStore(int size, int elementSize) {
+    super(size, elementSize);
+  }
+
+  /**
+   * @param size
+   * @param elementSize
+   * @param offset
+   * @param filePath
+   * @param length
+   */
+  public SingleArrayKeyFileStore(int size, int elementSize, long offset, String filePath,
+      int length) {
+    this(size, elementSize);
+    this.offset = offset;
+    this.filePath = filePath;
+    this.length = length;
+    datastore = null;
+  }
+
+  /**
+   * This method will be used to get the actual keys array present in the
+   * store. This method will read
+   * the data from file based on offset and length then return the data read from file
+   *
+   * @param fileHolder file holder will be used to read the file
+   * @return uncompressed
+   * keys will return uncompressed key
+   */
+  @Override public byte[] getBackArray(FileHolder fileHolder) {
+    if (null != fileHolder) {
+      return fileHolder.readByteArray(filePath, offset, length);
+    } else {
+      return new byte[0];
+    }
+  }
+
+  /**
+   * This method will be used to get the key array based on index This method
+   * will first read the data from file based on offset and length then get
+   * the array for index and return
+   *
+   * @param index      index in store
+   * @param fileHolder file holder will be used to read the file
+   * @return key
+   */
+  @Override public byte[] get(int index, FileHolder fileHolder) {
+    // read from file based on offset and index, fileholder will read that
+    // much byte from that offset,
+    byte[] unCompress = fileHolder.readByteArray(filePath, offset, length);
+    // create new array of size of each element
+    byte[] copy = new byte[sizeOfEachElement];
+    // copy array for given index
+    // copy will done based on below calculation
+    // eg: index is 4 and size of each key is 6 then copy from 6*4= 24th
+    // index till 29th index
+    System.arraycopy(unCompress, ((index) * sizeOfEachElement), copy, 0, sizeOfEachElement);
+    return copy;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyInMemoryStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyInMemoryStore.java
new file mode 100644
index 0000000..6e0dde3
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyInMemoryStore.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.key.uncompressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+public class SingleArrayKeyInMemoryStore extends AbstractSingleArrayKeyStore {
+
+  public SingleArrayKeyInMemoryStore(int size, int elementSize) {
+    super(size, elementSize);
+  }
+
+  public SingleArrayKeyInMemoryStore(int size, int elementSize, long offset, String filePath,
+      FileHolder fileHolder, int length) {
+    this(size, elementSize);
+    datastore = fileHolder.readByteArray(filePath, offset, length);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/util/StoreFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/util/StoreFactory.java b/core/src/main/java/org/apache/carbondata/core/datastorage/util/StoreFactory.java
new file mode 100644
index 0000000..408524f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/util/StoreFactory.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.util;
+
+import org.apache.carbondata.core.datastorage.store.NodeMeasureDataStore;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.impl.data.compressed.HeavyCompressedDoubleArrayDataInMemoryStore;
+import org.apache.carbondata.core.datastorage.store.impl.data.uncompressed.DoubleArrayDataInMemoryStore;
+
+public final class StoreFactory {
+  /**
+   * value type.
+   */
+  private static StoreType valueType;
+
+  static {
+    valueType = StoreType.HEAVY_VALUE_COMPRESSION;
+  }
+
+  private StoreFactory() {
+
+  }
+
+  public static NodeMeasureDataStore createDataStore(ValueCompressionModel compressionModel) {
+    switch (valueType) {
+      case COMPRESSED_DOUBLE_ARRAY:
+        return new DoubleArrayDataInMemoryStore(compressionModel);
+
+      case HEAVY_VALUE_COMPRESSION:
+        return new HeavyCompressedDoubleArrayDataInMemoryStore(compressionModel);
+      default:
+        return new HeavyCompressedDoubleArrayDataInMemoryStore(compressionModel);
+    }
+  }
+
+  /**
+   * enum defined.
+   */
+  public enum StoreType {
+    COMPRESSED_SINGLE_ARRAY,
+    COMPRESSED_DOUBLE_ARRAY,
+    HEAVY_VALUE_COMPRESSION
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/KeyGenException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/KeyGenException.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/KeyGenException.java
new file mode 100644
index 0000000..2824715
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/KeyGenException.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.keygenerator;
+
+/**
+ * It can be thrown while generating the key.
+ */
+public class KeyGenException extends Exception {
+
+  private static final long serialVersionUID = 3105132151795358241L;
+
+  public KeyGenException() {
+    super();
+  }
+
+  public KeyGenException(Exception e) {
+    super(e);
+  }
+
+  public KeyGenException(Exception e, String msg) {
+    super(msg, e);
+  }
+
+  public KeyGenException(String msg) {
+    super(msg);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/KeyGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/KeyGenerator.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/KeyGenerator.java
new file mode 100644
index 0000000..dc50b10
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/KeyGenerator.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.keygenerator;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+/**
+ * It generates the key by using multiple keys(typically multiple dimension keys
+ * are combined to form a single key). And it can return the individual
+ * key(dimensional key) out of combined key.
+ */
+public interface KeyGenerator extends Serializable, Comparator<byte[]> {
+  /**
+   * It generates the single key aka byte array from multiple keys.
+   *
+   * @param keys
+   * @return byte array
+   * @throws KeyGenException
+   */
+  byte[] generateKey(long[] keys) throws KeyGenException;
+
+  /**
+   * It generates the single key aka byte array from multiple keys.
+   *
+   * @param keys
+   * @return
+   * @throws KeyGenException
+   */
+  byte[] generateKey(int[] keys) throws KeyGenException;
+
+  /**
+   * It gets array of keys out of single key aka byte array
+   *
+   * @param key
+   * @return array of keys.
+   */
+  long[] getKeyArray(byte[] key);
+
+  /**
+   * It gets array of keys out of single key aka byte array
+   *
+   * @param key
+   * @param offset
+   * @return array of keys.
+   */
+  long[] getKeyArray(byte[] key, int offset);
+
+  /**
+   * It gets array of keys out of single key aka byte array
+   *
+   * @param key
+   * @param maskedByteRanges
+   * @return array of keys
+   */
+  long[] getKeyArray(byte[] key, int[] maskedByteRanges);
+
+  /**
+   * It gets the key in the specified index from the single key aka byte array
+   *
+   * @param key
+   * @param index of key.
+   * @return key
+   */
+  long getKey(byte[] key, int index);
+
+  /**
+   * Set any extra properties if required.
+   */
+  void setProperty(Object key, Object value);
+
+  /**
+   * Gives the key size in number of bytes.
+   */
+  int getKeySizeInBytes();
+
+  /**
+   * It gets the specified index and size from the single key aka byte aray
+   *
+   * @param key
+   * @param index
+   * @param size
+   * @return
+   */
+  long[] getSubKeyArray(byte[] key, int index, int size);
+
+  /**
+   * returns key bytes offset
+   *
+   * @param index
+   * @return
+   */
+  int[] getKeyByteOffsets(int index);
+
+  int compare(byte[] key1, int offset1, int length1, byte[] key2, int offset2, int length2);
+
+  /**
+   * returns the dimension count
+   *
+   * @return
+   */
+  int getDimCount();
+
+  int getStartAndEndKeySizeWithOnlyPrimitives();
+
+  void setStartAndEndKeySizeWithOnlyPrimitives(int startAndEndKeySizeWithPrimitives);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/ColumnarSplitter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/ColumnarSplitter.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/ColumnarSplitter.java
new file mode 100644
index 0000000..b2ea154
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/ColumnarSplitter.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.keygenerator.columnar;
+
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+
+/**
+ * Splits the odometer key to columns.Further these columns can be stored in a columnar storage.
+ */
+public interface ColumnarSplitter {
+  /**
+   * Splits generated MDKey to multiple columns.
+   *
+   * @param key MDKey
+   * @return Multiple columns in 2 dimensional byte array
+   */
+  byte[][] splitKey(byte[] key);
+
+  /**
+   * It generates and splits key to multiple columns
+   *
+   * @param keys
+   * @return
+   * @throws KeyGenException
+   */
+  byte[][] generateAndSplitKey(long[] keys) throws KeyGenException;
+
+  /**
+   * It generates and splits key to multiple columns
+   *
+   * @param keys
+   * @return
+   * @throws KeyGenException
+   */
+  byte[][] generateAndSplitKey(int[] keys) throws KeyGenException;
+
+  /**
+   * Takes the split keys and generates the surrogate key array
+   *
+   * @param key
+   * @return
+   */
+  long[] getKeyArray(byte[][] key);
+
+  /**
+   * Takes the split keys and generates the surrogate key array in bytes
+   *
+   * @param key
+   * @return
+   */
+  byte[] getKeyByteArray(byte[][] key);
+
+  /**
+   * Takes the split keys and generates the surrogate key array in bytes
+   *
+   * @param key
+   * @param columnIndexes, takes columnIndexes to consider which columns are present in the key
+   * @return
+   */
+  byte[] getKeyByteArray(byte[][] key, int[] columnIndexes);
+
+  /**
+   * Takes the split keys and generates the surrogate key array
+   *
+   * @param key
+   * @param columnIndexes, takes columnIndexes to consider which columns are present in the key
+   * @return
+   */
+  long[] getKeyArray(byte[][] key, int[] columnIndexes);
+
+  /**
+   * Below method will be used to get the block size
+   *
+   * @return
+   */
+  int[] getBlockKeySize();
+
+  /**
+   * Below method will be used to get the total key Size of the particular block
+   *
+   * @param blockIndexes
+   * @return
+   */
+  int getKeySizeByBlock(int[] blockIndexes);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthEquiSplitGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthEquiSplitGenerator.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthEquiSplitGenerator.java
new file mode 100644
index 0000000..9276fe3
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthEquiSplitGenerator.java
@@ -0,0 +1,244 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.keygenerator.columnar.impl;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.core.keygenerator.columnar.ColumnarSplitter;
+import org.apache.carbondata.core.keygenerator.mdkey.MultiDimKeyVarLengthGenerator;
+
+/**
+ * It is Equi Split implementation class of Columnar splitter. And uses var key length
+ * generator to generate keys.
+ * It splits depends on the @dimensionsToSplit parameter. This parameter decides how many
+ * dimensions should be present in each column.
+ */
+public class MultiDimKeyVarLengthEquiSplitGenerator extends MultiDimKeyVarLengthGenerator
+    implements ColumnarSplitter {
+
+  /**
+   *
+   */
+  private static final long serialVersionUID = -7767757692821917570L;
+
+  private byte dimensionsToSplit;
+
+  private int[][] splitDimArray;
+
+  private int[][] dimBlockArray;
+
+  private int[][][] byteRangesForDims;
+
+  private int[] blockKeySize;
+
+  public MultiDimKeyVarLengthEquiSplitGenerator(int[] lens, byte dimensionsToSplit) {
+    super(lens);
+    this.dimensionsToSplit = dimensionsToSplit;
+    intialize();
+  }
+
+  private void intialize() {
+    byte s = 0;
+    List<Set<Integer>> splitList =
+        new ArrayList<Set<Integer>>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+    Set<Integer> split = new TreeSet<Integer>();
+    splitList.add(split);
+    for (int i = 0; i < byteRangesForKeys.length; i++) {
+      if (s == dimensionsToSplit) {
+        s = 0;
+        split = new TreeSet<Integer>();
+        splitList.add(split);
+      }
+      for (int j = 0; j < byteRangesForKeys[i].length; j++) {
+        for (int j2 = byteRangesForKeys[i][0]; j2 <= byteRangesForKeys[i][1]; j2++) {
+          split.add(j2);
+        }
+      }
+      s++;
+    }
+    List<Integer>[] splits = new List[splitList.size()];
+    int i = 0;
+    for (Set<Integer> splitLocal : splitList) {
+      List<Integer> range = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+      for (Integer index : splitLocal) {
+        range.add(index);
+      }
+      splits[i++] = range;
+    }
+    for (int j = 1; j < splits.length; j++) {
+      if (splits[j - 1].get(splits[j - 1].size() - 1) == splits[j].get(0)) {
+        splits[j].remove(0);
+      }
+    }
+    splitDimArray = new int[splits.length][];
+    for (int j = 0; j < splits.length; j++) {
+      int[] a = convertToArray(splits[j]);
+      splitDimArray[j] = a.length > 0 ? new int[] { a[0], a[a.length - 1] } : a;
+    }
+
+    dimBlockArray = new int[byteRangesForKeys.length][];
+    Set<Integer>[] dimBlockSet = new Set[dimBlockArray.length];
+    for (int k = 0; k < byteRangesForKeys.length; k++) {
+      int[] dimRange = byteRangesForKeys[k];
+      Set<Integer> dimBlockPosSet = new TreeSet<Integer>();
+      dimBlockSet[k] = dimBlockPosSet;
+      for (int j = 0; j < splitDimArray.length; j++) {
+        if (dimRange[0] >= splitDimArray[j][0] && dimRange[0] <= splitDimArray[j][1]) {
+          dimBlockPosSet.add(j);
+        }
+        if (dimRange[1] >= splitDimArray[j][0] && dimRange[1] <= splitDimArray[j][1]) {
+          dimBlockPosSet.add(j);
+        }
+      }
+
+    }
+
+    for (int j = 0; j < dimBlockSet.length; j++) {
+      dimBlockArray[j] = convertToArray(dimBlockSet[j]);
+    }
+
+    int[][] splitDimArrayLocalIndexes = new int[splitDimArray.length][];
+    for (int j = 0; j < splitDimArrayLocalIndexes.length; j++) {
+      splitDimArrayLocalIndexes[j] = splitDimArray[j].length > 0 ?
+          new int[] { 0, splitDimArray[j][1] - splitDimArray[j][0] } :
+          new int[0];
+    }
+
+    byteRangesForDims = new int[byteRangesForKeys.length][][];
+    for (int j = 0; j < byteRangesForKeys.length; j++) {
+      if (dimBlockArray[j].length > 1) {
+        int[] bArray1 = splitDimArrayLocalIndexes[dimBlockArray[j][0]];
+        byteRangesForDims[j] = new int[2][2];
+        byteRangesForDims[j][0] =
+            new int[] { bArray1[bArray1.length - 1], bArray1[bArray1.length - 1] };
+        byteRangesForDims[j][1] = new int[] { 0,
+            (byteRangesForKeys[j][byteRangesForKeys[j].length - 1] - byteRangesForKeys[j][0]) - 1 };
+      } else {
+        byteRangesForDims[j] = new int[1][1];
+        int[] bArray1 = splitDimArray[dimBlockArray[j][0]];
+        byteRangesForDims[j][0] = new int[] { byteRangesForKeys[j][0] - bArray1[0],
+            byteRangesForKeys[j][1] - bArray1[0] };
+      }
+    }
+    blockKeySize = new int[splitDimArray.length];
+
+    for (int j = 0; j < blockKeySize.length; j++) {
+      blockKeySize[j] =
+          splitDimArray[j].length > 0 ? splitDimArray[j][1] - splitDimArray[j][0] + 1 : 0;
+    }
+  }
+
+  private int[] convertToArray(List<Integer> list) {
+    int[] ints = new int[list.size()];
+    for (int i = 0; i < ints.length; i++) {
+      ints[i] = list.get(i);
+    }
+    return ints;
+  }
+
+  private int[] convertToArray(Set<Integer> set) {
+    int[] ints = new int[set.size()];
+    int i = 0;
+    for (Iterator iterator = set.iterator(); iterator.hasNext(); ) {
+      ints[i++] = (Integer) iterator.next();
+    }
+    return ints;
+  }
+
+  @Override public byte[][] splitKey(byte[] key) {
+    byte[][] split = new byte[blockKeySize.length][];
+    int copyIndex = 0;
+    for (int i = 0; i < split.length; i++) {
+      split[i] = new byte[blockKeySize[i]];
+      System.arraycopy(key, copyIndex, split[i], 0, split[i].length);
+      copyIndex += blockKeySize[i];
+    }
+    return split;
+  }
+
+  @Override public byte[][] generateAndSplitKey(long[] keys) throws KeyGenException {
+    return splitKey(generateKey(keys));
+  }
+
+  @Override public byte[][] generateAndSplitKey(int[] keys) throws KeyGenException {
+    return splitKey(generateKey(keys));
+  }
+
+  @Override public long[] getKeyArray(byte[][] key) {
+    byte[] fullKey = new byte[getKeySizeInBytes()];
+    int copyIndex = 0;
+    for (int i = 0; i < key.length; i++) {
+      System.arraycopy(key[i], 0, fullKey, copyIndex, key[i].length);
+      copyIndex += key[i].length;
+    }
+    return getKeyArray(fullKey);
+  }
+
+  @Override public byte[] getKeyByteArray(byte[][] key) {
+    byte[] fullKey = new byte[getKeySizeInBytes()];
+    int copyIndex = 0;
+    for (int i = 0; i < key.length; i++) {
+      System.arraycopy(key[i], 0, fullKey, copyIndex, key[i].length);
+      copyIndex += key[i].length;
+    }
+    return fullKey;
+  }
+
+  @Override public byte[] getKeyByteArray(byte[][] key, int[] columnIndexes) {
+    return null;
+  }
+
+  @Override public long[] getKeyArray(byte[][] key, int[] columnIndexes) {
+    return null;
+  }
+
+  public int[] getBlockKeySize() {
+    return blockKeySize;
+  }
+
+  @Override public int getKeySizeByBlock(int[] blockIndexes) {
+    int size = 0;
+
+    for (int i = 0; i < blockIndexes.length; i++) {
+      if (blockIndexes[i] < blockKeySize.length) {
+        size += blockKeySize[blockIndexes[i]];
+      }
+    }
+    return size;
+  }
+
+  @Override public boolean equals(Object obj) {
+    if(!(obj instanceof MultiDimKeyVarLengthEquiSplitGenerator)) {
+      return false;
+    }
+    MultiDimKeyVarLengthEquiSplitGenerator o = (MultiDimKeyVarLengthEquiSplitGenerator)obj;
+    return o.dimensionsToSplit == dimensionsToSplit && super.equals(obj);
+  }
+
+  @Override public int hashCode() {
+    return super.hashCode() + dimensionsToSplit;
+  }
+}



[36/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/CarbonMergerUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonMergerUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonMergerUtil.java
new file mode 100644
index 0000000..9037e0d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonMergerUtil.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * Util class for merge activities of 2 loads.
+ */
+public class CarbonMergerUtil {
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(CarbonMergerUtil.class.getName());
+
+  public static int[] getCardinalityFromLevelMetadata(String path, String tableName) {
+    int[] localCardinality = null;
+    try {
+      localCardinality = CarbonUtil.getCardinalityFromLevelMetadataFile(
+          path + '/' + CarbonCommonConstants.LEVEL_METADATA_FILE + tableName + ".metadata");
+    } catch (CarbonUtilException e) {
+      LOGGER.error("Error occurred :: " + e.getMessage());
+    }
+
+    return localCardinality;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java
new file mode 100644
index 0000000..de0ea44
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java
@@ -0,0 +1,450 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.util;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.metadata.index.BlockIndexInfo;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.metadata.BlockletInfoColumnar;
+import org.apache.carbondata.core.metadata.ValueEncoderMeta;
+import org.apache.carbondata.format.BlockIndex;
+import org.apache.carbondata.format.BlockletBTreeIndex;
+import org.apache.carbondata.format.BlockletIndex;
+import org.apache.carbondata.format.BlockletInfo;
+import org.apache.carbondata.format.BlockletMinMaxIndex;
+import org.apache.carbondata.format.ChunkCompressionMeta;
+import org.apache.carbondata.format.ColumnSchema;
+import org.apache.carbondata.format.CompressionCodec;
+import org.apache.carbondata.format.DataChunk;
+import org.apache.carbondata.format.Encoding;
+import org.apache.carbondata.format.FileFooter;
+import org.apache.carbondata.format.IndexHeader;
+import org.apache.carbondata.format.PresenceMeta;
+import org.apache.carbondata.format.SegmentInfo;
+import org.apache.carbondata.format.SortState;
+
+/**
+ * Util class to convert to thrift metdata classes
+ */
+public class CarbonMetadataUtil {
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(CarbonMetadataUtil.class.getName());
+
+  /**
+   * It converts list of BlockletInfoColumnar to FileFooter thrift objects
+   *
+   * @param infoList
+   * @param numCols
+   * @param cardinalities
+   * @return FileFooter
+   */
+  public static FileFooter convertFileFooter(List<BlockletInfoColumnar> infoList, int numCols,
+      int[] cardinalities, List<ColumnSchema> columnSchemaList,
+      SegmentProperties segmentProperties) throws IOException {
+
+    SegmentInfo segmentInfo = new SegmentInfo();
+    segmentInfo.setNum_cols(columnSchemaList.size());
+    segmentInfo.setColumn_cardinalities(CarbonUtil.convertToIntegerList(cardinalities));
+
+    FileFooter footer = new FileFooter();
+    footer.setNum_rows(getTotalNumberOfRows(infoList));
+    footer.setSegment_info(segmentInfo);
+    for (BlockletInfoColumnar info : infoList) {
+      footer.addToBlocklet_index_list(getBlockletIndex(info));
+    }
+    footer.setTable_columns(columnSchemaList);
+    for (BlockletInfoColumnar info : infoList) {
+      footer.addToBlocklet_info_list(getBlockletInfo(info, columnSchemaList, segmentProperties));
+    }
+    return footer;
+  }
+
+  private static BlockletIndex getBlockletIndex(
+      org.apache.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex info) {
+    BlockletMinMaxIndex blockletMinMaxIndex = new BlockletMinMaxIndex();
+
+    for (int i = 0; i < info.getMinMaxIndex().getMaxValues().length; i++) {
+      blockletMinMaxIndex.addToMax_values(ByteBuffer.wrap(info.getMinMaxIndex().getMaxValues()[i]));
+      blockletMinMaxIndex.addToMin_values(ByteBuffer.wrap(info.getMinMaxIndex().getMinValues()[i]));
+    }
+    BlockletBTreeIndex blockletBTreeIndex = new BlockletBTreeIndex();
+    blockletBTreeIndex.setStart_key(info.getBtreeIndex().getStartKey());
+    blockletBTreeIndex.setEnd_key(info.getBtreeIndex().getEndKey());
+    BlockletIndex blockletIndex = new BlockletIndex();
+    blockletIndex.setMin_max_index(blockletMinMaxIndex);
+    blockletIndex.setB_tree_index(blockletBTreeIndex);
+    return blockletIndex;
+  }
+
+  /**
+   * Get total number of rows for the file.
+   *
+   * @param infoList
+   * @return
+   */
+  private static long getTotalNumberOfRows(List<BlockletInfoColumnar> infoList) {
+    long numberOfRows = 0;
+    for (BlockletInfoColumnar info : infoList) {
+      numberOfRows += info.getNumberOfKeys();
+    }
+    return numberOfRows;
+  }
+
+  private static BlockletIndex getBlockletIndex(BlockletInfoColumnar info) {
+
+    BlockletMinMaxIndex blockletMinMaxIndex = new BlockletMinMaxIndex();
+    for (byte[] max : info.getColumnMaxData()) {
+      blockletMinMaxIndex.addToMax_values(ByteBuffer.wrap(max));
+    }
+    for (byte[] min : info.getColumnMinData()) {
+      blockletMinMaxIndex.addToMin_values(ByteBuffer.wrap(min));
+    }
+    BlockletBTreeIndex blockletBTreeIndex = new BlockletBTreeIndex();
+    blockletBTreeIndex.setStart_key(info.getStartKey());
+    blockletBTreeIndex.setEnd_key(info.getEndKey());
+
+    BlockletIndex blockletIndex = new BlockletIndex();
+    blockletIndex.setMin_max_index(blockletMinMaxIndex);
+    blockletIndex.setB_tree_index(blockletBTreeIndex);
+    return blockletIndex;
+  }
+
+  private static BlockletInfo getBlockletInfo(BlockletInfoColumnar blockletInfoColumnar,
+      List<ColumnSchema> columnSchenma,
+      SegmentProperties segmentProperties) throws IOException {
+
+    BlockletInfo blockletInfo = new BlockletInfo();
+    blockletInfo.setNum_rows(blockletInfoColumnar.getNumberOfKeys());
+
+    List<DataChunk> colDataChunks = new ArrayList<DataChunk>();
+    blockletInfoColumnar.getKeyLengths();
+    int j = 0;
+    int aggregateIndex = 0;
+    boolean[] isSortedKeyColumn = blockletInfoColumnar.getIsSortedKeyColumn();
+    boolean[] aggKeyBlock = blockletInfoColumnar.getAggKeyBlock();
+    boolean[] colGrpblock = blockletInfoColumnar.getColGrpBlocks();
+    for (int i = 0; i < blockletInfoColumnar.getKeyLengths().length; i++) {
+      DataChunk dataChunk = new DataChunk();
+      dataChunk.setChunk_meta(getChunkCompressionMeta());
+      List<Encoding> encodings = new ArrayList<Encoding>();
+      if (containsEncoding(i, Encoding.DICTIONARY, columnSchenma, segmentProperties)) {
+        encodings.add(Encoding.DICTIONARY);
+      }
+      if (containsEncoding(i, Encoding.DIRECT_DICTIONARY, columnSchenma, segmentProperties)) {
+        encodings.add(Encoding.DIRECT_DICTIONARY);
+      }
+      dataChunk.setRowMajor(colGrpblock[i]);
+      //TODO : Once schema PR is merged and information needs to be passed here.
+      dataChunk.setColumn_ids(new ArrayList<Integer>());
+      dataChunk.setData_page_length(blockletInfoColumnar.getKeyLengths()[i]);
+      dataChunk.setData_page_offset(blockletInfoColumnar.getKeyOffSets()[i]);
+      if (aggKeyBlock[i]) {
+        dataChunk.setRle_page_offset(blockletInfoColumnar.getDataIndexMapOffsets()[aggregateIndex]);
+        dataChunk.setRle_page_length(blockletInfoColumnar.getDataIndexMapLength()[aggregateIndex]);
+        encodings.add(Encoding.RLE);
+        aggregateIndex++;
+      }
+      dataChunk
+          .setSort_state(isSortedKeyColumn[i] ? SortState.SORT_EXPLICIT : SortState.SORT_NATIVE);
+
+      if (!isSortedKeyColumn[i]) {
+        dataChunk.setRowid_page_offset(blockletInfoColumnar.getKeyBlockIndexOffSets()[j]);
+        dataChunk.setRowid_page_length(blockletInfoColumnar.getKeyBlockIndexLength()[j]);
+        encodings.add(Encoding.INVERTED_INDEX);
+        j++;
+      }
+
+      //TODO : Right now the encodings are happening at runtime. change as per this encoders.
+      dataChunk.setEncoders(encodings);
+
+      colDataChunks.add(dataChunk);
+    }
+
+    for (int i = 0; i < blockletInfoColumnar.getMeasureLength().length; i++) {
+      DataChunk dataChunk = new DataChunk();
+      dataChunk.setChunk_meta(getChunkCompressionMeta());
+      dataChunk.setRowMajor(false);
+      //TODO : Once schema PR is merged and information needs to be passed here.
+      dataChunk.setColumn_ids(new ArrayList<Integer>());
+      dataChunk.setData_page_length(blockletInfoColumnar.getMeasureLength()[i]);
+      dataChunk.setData_page_offset(blockletInfoColumnar.getMeasureOffset()[i]);
+      //TODO : Right now the encodings are happening at runtime. change as per this encoders.
+      List<Encoding> encodings = new ArrayList<Encoding>();
+      encodings.add(Encoding.DELTA);
+      dataChunk.setEncoders(encodings);
+      //TODO writing dummy presence meta need to set actual presence
+      //meta
+      PresenceMeta presenceMeta = new PresenceMeta();
+      presenceMeta.setPresent_bit_streamIsSet(true);
+      presenceMeta
+          .setPresent_bit_stream(blockletInfoColumnar.getMeasureNullValueIndex()[i].toByteArray());
+      dataChunk.setPresence(presenceMeta);
+      //TODO : PresenceMeta needs to be implemented and set here
+      // dataChunk.setPresence(new PresenceMeta());
+      //TODO : Need to write ValueCompression meta here.
+      List<ByteBuffer> encoderMetaList = new ArrayList<ByteBuffer>();
+      encoderMetaList.add(ByteBuffer.wrap(serializeEncoderMeta(
+          createValueEncoderMeta(blockletInfoColumnar.getCompressionModel(), i))));
+      dataChunk.setEncoder_meta(encoderMetaList);
+      colDataChunks.add(dataChunk);
+    }
+    blockletInfo.setColumn_data_chunks(colDataChunks);
+
+    return blockletInfo;
+  }
+
+  /**
+   * @param blockIndex
+   * @param encoding
+   * @param columnSchemas
+   * @param segmentProperties
+   * @return return true if given encoding is present in column
+   */
+  private static boolean containsEncoding(int blockIndex, Encoding encoding,
+      List<ColumnSchema> columnSchemas, SegmentProperties segmentProperties) {
+    Set<Integer> dimOrdinals = segmentProperties.getDimensionOrdinalForBlock(blockIndex);
+    //column groups will always have dictionary encoding
+    if (dimOrdinals.size() > 1 && Encoding.DICTIONARY == encoding) {
+      return true;
+    }
+    for (Integer dimOrdinal : dimOrdinals) {
+      if (columnSchemas.get(dimOrdinal).encoders.contains(encoding)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private static byte[] serializeEncoderMeta(ValueEncoderMeta encoderMeta) throws IOException {
+    // TODO : should remove the unnecessary fields.
+    ByteArrayOutputStream aos = new ByteArrayOutputStream();
+    ObjectOutputStream objStream = new ObjectOutputStream(aos);
+    objStream.writeObject(encoderMeta);
+    objStream.close();
+    return aos.toByteArray();
+  }
+
+  private static ValueEncoderMeta createValueEncoderMeta(ValueCompressionModel compressionModel,
+      int index) {
+    ValueEncoderMeta encoderMeta = new ValueEncoderMeta();
+    encoderMeta.setMaxValue(compressionModel.getMaxValue()[index]);
+    encoderMeta.setMinValue(compressionModel.getMinValue()[index]);
+    encoderMeta.setDataTypeSelected(compressionModel.getDataTypeSelected()[index]);
+    encoderMeta.setDecimal(compressionModel.getDecimal()[index]);
+    encoderMeta.setType(compressionModel.getType()[index]);
+    encoderMeta.setUniqueValue(compressionModel.getUniqueValue()[index]);
+    return encoderMeta;
+  }
+
+  /**
+   * Right now it is set to default values. We may use this in future
+   */
+  private static ChunkCompressionMeta getChunkCompressionMeta() {
+    ChunkCompressionMeta chunkCompressionMeta = new ChunkCompressionMeta();
+    chunkCompressionMeta.setCompression_codec(CompressionCodec.SNAPPY);
+    chunkCompressionMeta.setTotal_compressed_size(0);
+    chunkCompressionMeta.setTotal_uncompressed_size(0);
+    return chunkCompressionMeta;
+  }
+
+  /**
+   * It converts FileFooter thrift object to list of BlockletInfoColumnar objects
+   *
+   * @param footer
+   * @return
+   */
+  public static List<BlockletInfoColumnar> convertBlockletInfo(FileFooter footer)
+      throws IOException {
+    List<BlockletInfoColumnar> listOfNodeInfo =
+        new ArrayList<BlockletInfoColumnar>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+    for (BlockletInfo blockletInfo : footer.getBlocklet_info_list()) {
+      BlockletInfoColumnar blockletInfoColumnar = new BlockletInfoColumnar();
+      blockletInfoColumnar.setNumberOfKeys(blockletInfo.getNum_rows());
+      List<DataChunk> columnChunks = blockletInfo.getColumn_data_chunks();
+      List<DataChunk> dictChunks = new ArrayList<DataChunk>();
+      List<DataChunk> nonDictColChunks = new ArrayList<DataChunk>();
+      for (DataChunk dataChunk : columnChunks) {
+        if (dataChunk.getEncoders().get(0).equals(Encoding.DICTIONARY)) {
+          dictChunks.add(dataChunk);
+        } else {
+          nonDictColChunks.add(dataChunk);
+        }
+      }
+      int[] keyLengths = new int[dictChunks.size()];
+      long[] keyOffSets = new long[dictChunks.size()];
+      long[] keyBlockIndexOffsets = new long[dictChunks.size()];
+      int[] keyBlockIndexLens = new int[dictChunks.size()];
+      long[] indexMapOffsets = new long[dictChunks.size()];
+      int[] indexMapLens = new int[dictChunks.size()];
+      boolean[] sortState = new boolean[dictChunks.size()];
+      int i = 0;
+      for (DataChunk dataChunk : dictChunks) {
+        keyLengths[i] = dataChunk.getData_page_length();
+        keyOffSets[i] = dataChunk.getData_page_offset();
+        keyBlockIndexOffsets[i] = dataChunk.getRowid_page_offset();
+        keyBlockIndexLens[i] = dataChunk.getRowid_page_length();
+        indexMapOffsets[i] = dataChunk.getRle_page_offset();
+        indexMapLens[i] = dataChunk.getRle_page_length();
+        sortState[i] = dataChunk.getSort_state().equals(SortState.SORT_EXPLICIT) ? true : false;
+        i++;
+      }
+      blockletInfoColumnar.setKeyLengths(keyLengths);
+      blockletInfoColumnar.setKeyOffSets(keyOffSets);
+      blockletInfoColumnar.setKeyBlockIndexOffSets(keyBlockIndexOffsets);
+      blockletInfoColumnar.setKeyBlockIndexLength(keyBlockIndexLens);
+      blockletInfoColumnar.setDataIndexMapOffsets(indexMapOffsets);
+      blockletInfoColumnar.setDataIndexMapLength(indexMapLens);
+      blockletInfoColumnar.setIsSortedKeyColumn(sortState);
+
+      int[] msrLens = new int[nonDictColChunks.size()];
+      long[] msrOffsets = new long[nonDictColChunks.size()];
+      ValueEncoderMeta[] encoderMetas = new ValueEncoderMeta[nonDictColChunks.size()];
+      i = 0;
+      for (DataChunk msrChunk : nonDictColChunks) {
+        msrLens[i] = msrChunk.getData_page_length();
+        msrOffsets[i] = msrChunk.getData_page_offset();
+        encoderMetas[i] = deserializeValueEncoderMeta(msrChunk.getEncoder_meta().get(0));
+        i++;
+      }
+      blockletInfoColumnar.setMeasureLength(msrLens);
+      blockletInfoColumnar.setMeasureOffset(msrOffsets);
+      blockletInfoColumnar.setCompressionModel(getValueCompressionModel(encoderMetas));
+      listOfNodeInfo.add(blockletInfoColumnar);
+    }
+
+    setBlockletIndex(footer, listOfNodeInfo);
+    return listOfNodeInfo;
+  }
+
+  private static ValueEncoderMeta deserializeValueEncoderMeta(ByteBuffer byteBuffer)
+      throws IOException {
+    ByteArrayInputStream bis = new ByteArrayInputStream(byteBuffer.array());
+    ObjectInputStream objStream = new ObjectInputStream(bis);
+    ValueEncoderMeta encoderMeta = null;
+    try {
+      encoderMeta = (ValueEncoderMeta) objStream.readObject();
+    } catch (ClassNotFoundException e) {
+      LOGGER.error("Error while reading ValueEncoderMeta");
+    }
+    return encoderMeta;
+
+  }
+
+  private static ValueCompressionModel getValueCompressionModel(ValueEncoderMeta[] encoderMetas) {
+    Object[] maxValue = new Object[encoderMetas.length];
+    Object[] minValue = new Object[encoderMetas.length];
+    int[] decimalLength = new int[encoderMetas.length];
+    Object[] uniqueValue = new Object[encoderMetas.length];
+    char[] aggType = new char[encoderMetas.length];
+    byte[] dataTypeSelected = new byte[encoderMetas.length];
+    for (int i = 0; i < encoderMetas.length; i++) {
+      maxValue[i] = encoderMetas[i].getMaxValue();
+      minValue[i] = encoderMetas[i].getMinValue();
+      decimalLength[i] = encoderMetas[i].getDecimal();
+      uniqueValue[i] = encoderMetas[i].getUniqueValue();
+      aggType[i] = encoderMetas[i].getType();
+      dataTypeSelected[i] = encoderMetas[i].getDataTypeSelected();
+    }
+    return ValueCompressionUtil
+        .getValueCompressionModel(maxValue, minValue, decimalLength, uniqueValue, aggType,
+            dataTypeSelected);
+  }
+
+  private static void setBlockletIndex(FileFooter footer,
+      List<BlockletInfoColumnar> listOfNodeInfo) {
+    List<BlockletIndex> blockletIndexList = footer.getBlocklet_index_list();
+    for (int i = 0; i < blockletIndexList.size(); i++) {
+      BlockletBTreeIndex bTreeIndexList = blockletIndexList.get(i).getB_tree_index();
+      BlockletMinMaxIndex minMaxIndexList = blockletIndexList.get(i).getMin_max_index();
+
+      listOfNodeInfo.get(i).setStartKey(bTreeIndexList.getStart_key());
+      listOfNodeInfo.get(i).setEndKey(bTreeIndexList.getEnd_key());
+      byte[][] min = new byte[minMaxIndexList.getMin_values().size()][];
+      byte[][] max = new byte[minMaxIndexList.getMax_values().size()][];
+      for (int j = 0; j < minMaxIndexList.getMax_valuesSize(); j++) {
+        min[j] = minMaxIndexList.getMin_values().get(j).array();
+        max[j] = minMaxIndexList.getMax_values().get(j).array();
+      }
+      listOfNodeInfo.get(i).setColumnMaxData(max);
+    }
+  }
+
+  /**
+   * Below method will be used to get the index header
+   *
+   * @param columnCardinality cardinality of each column
+   * @param columnSchemaList  list of column present in the table
+   * @return Index header object
+   */
+  public static IndexHeader getIndexHeader(int[] columnCardinality,
+      List<ColumnSchema> columnSchemaList) {
+    // create segment info object
+    SegmentInfo segmentInfo = new SegmentInfo();
+    // set the number of columns
+    segmentInfo.setNum_cols(columnSchemaList.size());
+    // setting the column cardinality
+    segmentInfo.setColumn_cardinalities(CarbonUtil.convertToIntegerList(columnCardinality));
+    // create index header object
+    IndexHeader indexHeader = new IndexHeader();
+    // set the segment info
+    indexHeader.setSegment_info(segmentInfo);
+    // set the column names
+    indexHeader.setTable_columns(columnSchemaList);
+    return indexHeader;
+  }
+
+  /**
+   * Below method will be used to get the block index info thrift object for each block
+   * present in the segment
+   *
+   * @param blockIndexInfoList block index info list
+   * @return list of block index
+   */
+  public static List<BlockIndex> getBlockIndexInfo(List<BlockIndexInfo> blockIndexInfoList) {
+    List<BlockIndex> thriftBlockIndexList = new ArrayList<BlockIndex>();
+    BlockIndex blockIndex = null;
+    // below code to create block index info object for each block
+    for (BlockIndexInfo blockIndexInfo : blockIndexInfoList) {
+      blockIndex = new BlockIndex();
+      blockIndex.setNum_rows(blockIndexInfo.getNumberOfRows());
+      blockIndex.setOffset(blockIndexInfo.getNumberOfRows());
+      blockIndex.setFile_name(blockIndexInfo.getFileName());
+      blockIndex.setBlock_index(getBlockletIndex(blockIndexInfo.getBlockletIndex()));
+      thriftBlockIndexList.add(blockIndex);
+    }
+    return thriftBlockIndexList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
new file mode 100644
index 0000000..befd906
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
@@ -0,0 +1,494 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Properties;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+public final class CarbonProperties {
+  /**
+   * Attribute for Carbon LOGGER.
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(CarbonProperties.class.getName());
+
+  /**
+   * class instance.
+   */
+  private static final CarbonProperties CARBONPROPERTIESINSTANCE = new CarbonProperties();
+
+  /**
+   * porpeties .
+   */
+  private Properties carbonProperties;
+
+  /**
+   * Private constructor this will call load properties method to load all the
+   * carbon properties in memory.
+   */
+  private CarbonProperties() {
+    carbonProperties = new Properties();
+    loadProperties();
+    validateAndLoadDefaultProperties();
+  }
+
+  /**
+   * This method will be responsible for get this class instance
+   *
+   * @return carbon properties instance
+   */
+  public static CarbonProperties getInstance() {
+    return CARBONPROPERTIESINSTANCE;
+  }
+
+  /**
+   * This method validates the loaded properties and loads default
+   * values in case of wrong values.
+   */
+  private void validateAndLoadDefaultProperties() {
+    if (null == carbonProperties.getProperty(CarbonCommonConstants.STORE_LOCATION)) {
+      carbonProperties.setProperty(CarbonCommonConstants.STORE_LOCATION,
+          CarbonCommonConstants.STORE_LOCATION_DEFAULT_VAL);
+    }
+
+    validateBlockletSize();
+    validateMaxFileSize();
+    validateNumCores();
+    validateNumCoresBlockSort();
+    validateSortSize();
+    validateBadRecordsLocation();
+    validateHighCardinalityIdentify();
+    validateHighCardinalityThreshold();
+    validateHighCardinalityInRowCountPercentage();
+  }
+
+  private void validateBadRecordsLocation() {
+    String badRecordsLocation =
+        carbonProperties.getProperty(CarbonCommonConstants.CARBON_BADRECORDS_LOC);
+    if (null == badRecordsLocation || badRecordsLocation.length() == 0) {
+      carbonProperties.setProperty(CarbonCommonConstants.CARBON_BADRECORDS_LOC,
+          CarbonCommonConstants.CARBON_BADRECORDS_LOC_DEFAULT_VAL);
+    }
+  }
+
+  /**
+   * This method validates the blocklet size
+   */
+  private void validateBlockletSize() {
+    String blockletSizeStr = carbonProperties.getProperty(CarbonCommonConstants.BLOCKLET_SIZE,
+        CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
+    try {
+      int blockletSize = Integer.parseInt(blockletSizeStr);
+
+      if (blockletSize < CarbonCommonConstants.BLOCKLET_SIZE_MIN_VAL
+          || blockletSize > CarbonCommonConstants.BLOCKLET_SIZE_MAX_VAL) {
+        LOGGER.info("The blocklet size value \"" + blockletSizeStr
+                + "\" is invalid. Using the default value \""
+                + CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
+        carbonProperties.setProperty(CarbonCommonConstants.BLOCKLET_SIZE,
+            CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
+      }
+    } catch (NumberFormatException e) {
+      LOGGER.info("The blocklet size value \"" + blockletSizeStr
+              + "\" is invalid. Using the default value \""
+              + CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
+      carbonProperties.setProperty(CarbonCommonConstants.BLOCKLET_SIZE,
+          CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
+    }
+  }
+
+  /**
+   * TODO: This method validates the maximum number of blocklets per file ?
+   */
+  private void validateMaxFileSize() {
+    String maxFileSizeStr = carbonProperties.getProperty(CarbonCommonConstants.MAX_FILE_SIZE,
+        CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL);
+    try {
+      int maxFileSize = Integer.parseInt(maxFileSizeStr);
+
+      if (maxFileSize < CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL_MIN_VAL
+          || maxFileSize > CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL_MAX_VAL) {
+        LOGGER.info("The max file size value \"" + maxFileSizeStr
+                + "\" is invalid. Using the default value \""
+                + CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL);
+        carbonProperties.setProperty(CarbonCommonConstants.MAX_FILE_SIZE,
+            CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL);
+      }
+    } catch (NumberFormatException e) {
+      LOGGER.info("The max file size value \"" + maxFileSizeStr
+              + "\" is invalid. Using the default value \""
+              + CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL);
+
+      carbonProperties.setProperty(CarbonCommonConstants.MAX_FILE_SIZE,
+          CarbonCommonConstants.MAX_FILE_SIZE_DEFAULT_VAL);
+    }
+  }
+
+  /**
+   * This method validates the number cores specified
+   */
+  private void validateNumCores() {
+    String numCoresStr = carbonProperties
+        .getProperty(CarbonCommonConstants.NUM_CORES, CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
+    try {
+      int numCores = Integer.parseInt(numCoresStr);
+
+      if (numCores < CarbonCommonConstants.NUM_CORES_MIN_VAL
+          || numCores > CarbonCommonConstants.NUM_CORES_MAX_VAL) {
+        LOGGER.info("The num Cores  value \"" + numCoresStr
+            + "\" is invalid. Using the default value \""
+            + CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
+        carbonProperties.setProperty(CarbonCommonConstants.NUM_CORES,
+            CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
+      }
+    } catch (NumberFormatException e) {
+      LOGGER.info("The num Cores  value \"" + numCoresStr
+          + "\" is invalid. Using the default value \""
+          + CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
+      carbonProperties.setProperty(CarbonCommonConstants.NUM_CORES,
+          CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
+    }
+  }
+
+  /**
+   * This method validates the number cores specified for mdk block sort
+   */
+  private void validateNumCoresBlockSort() {
+    String numCoresStr = carbonProperties
+        .getProperty(CarbonCommonConstants.NUM_CORES_BLOCK_SORT,
+            CarbonCommonConstants.NUM_CORES_BLOCK_SORT_DEFAULT_VAL);
+    try {
+      int numCores = Integer.parseInt(numCoresStr);
+
+      if (numCores < CarbonCommonConstants.NUM_CORES_BLOCK_SORT_MIN_VAL
+          || numCores > CarbonCommonConstants.NUM_CORES_BLOCK_SORT_MAX_VAL) {
+        LOGGER.info("The num cores value \"" + numCoresStr
+            + "\" for block sort is invalid. Using the default value \""
+            + CarbonCommonConstants.NUM_CORES_BLOCK_SORT_DEFAULT_VAL);
+        carbonProperties.setProperty(CarbonCommonConstants.NUM_CORES_BLOCK_SORT,
+            CarbonCommonConstants.NUM_CORES_BLOCK_SORT_DEFAULT_VAL);
+      }
+    } catch (NumberFormatException e) {
+      LOGGER.info("The num cores value \"" + numCoresStr
+          + "\" for block sort is invalid. Using the default value \""
+          + CarbonCommonConstants.NUM_CORES_BLOCK_SORT_DEFAULT_VAL);
+      carbonProperties.setProperty(CarbonCommonConstants.NUM_CORES_BLOCK_SORT,
+          CarbonCommonConstants.NUM_CORES_BLOCK_SORT_DEFAULT_VAL);
+    }
+  }
+
+  /**
+   * This method validates the sort size
+   */
+  private void validateSortSize() {
+    String sortSizeStr = carbonProperties
+        .getProperty(CarbonCommonConstants.SORT_SIZE, CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL);
+    try {
+      int sortSize = Integer.parseInt(sortSizeStr);
+
+      if (sortSize < CarbonCommonConstants.SORT_SIZE_MIN_VAL) {
+        LOGGER.info("The batch size value \"" + sortSizeStr
+            + "\" is invalid. Using the default value \""
+            + CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL);
+        carbonProperties.setProperty(CarbonCommonConstants.SORT_SIZE,
+            CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL);
+      }
+    } catch (NumberFormatException e) {
+      LOGGER.info("The batch size value \"" + sortSizeStr
+          + "\" is invalid. Using the default value \""
+          + CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL);
+      carbonProperties.setProperty(CarbonCommonConstants.SORT_SIZE,
+          CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL);
+    }
+  }
+
+  private void validateHighCardinalityIdentify() {
+    String highcardIdentifyStr = carbonProperties.getProperty(
+        CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE,
+        CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE_DEFAULT);
+    try {
+      Boolean.parseBoolean(highcardIdentifyStr);
+    } catch (NumberFormatException e) {
+      LOGGER.info("The high cardinality identify value \"" + highcardIdentifyStr
+          + "\" is invalid. Using the default value \""
+          + CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE_DEFAULT);
+      carbonProperties.setProperty(CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE,
+          CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE_DEFAULT);
+    }
+  }
+
+  private void validateHighCardinalityThreshold() {
+    String highcardThresholdStr = carbonProperties.getProperty(
+        CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD,
+        CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_DEFAULT);
+    try {
+      int highcardThreshold = Integer.parseInt(highcardThresholdStr);
+      if(highcardThreshold < CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_MIN){
+        LOGGER.info("The high cardinality threshold value \"" + highcardThresholdStr
+            + "\" is invalid. Using the min value \""
+            + CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_MIN);
+        carbonProperties.setProperty(CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD,
+            CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_MIN + "");
+      }
+    } catch (NumberFormatException e) {
+      LOGGER.info("The high cardinality threshold value \"" + highcardThresholdStr
+          + "\" is invalid. Using the default value \""
+          + CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_DEFAULT);
+      carbonProperties.setProperty(CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD,
+          CarbonCommonConstants.HIGH_CARDINALITY_THRESHOLD_DEFAULT);
+    }
+  }
+
+  private void validateHighCardinalityInRowCountPercentage() {
+    String highcardPercentageStr = carbonProperties.getProperty(
+        CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE,
+        CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT);
+    try {
+      double highcardPercentage = Double.parseDouble(highcardPercentageStr);
+      if(highcardPercentage <= 0){
+        LOGGER.info("The percentage of high cardinality in row count value \""
+            + highcardPercentageStr + "\" is invalid. Using the default value \""
+            + CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT);
+        carbonProperties.setProperty(
+            CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE,
+            CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT);
+      }
+    } catch (NumberFormatException e) {
+      LOGGER.info("The percentage of high cardinality in row count value \""
+          + highcardPercentageStr + "\" is invalid. Using the default value \""
+          + CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT);
+      carbonProperties.setProperty(CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE,
+          CarbonCommonConstants.HIGH_CARDINALITY_IN_ROW_COUNT_PERCENTAGE_DEFAULT);
+    }
+  }
+
+  /**
+   * This method will read all the properties from file and load it into
+   * memory
+   */
+  private void loadProperties() {
+    String property = System.getProperty("carbon.properties.filepath");
+    if (null == property) {
+      property = CarbonCommonConstants.CARBON_PROPERTIES_FILE_PATH;
+    }
+    File file = new File(property);
+    LOGGER.info("Property file path: " + file.getAbsolutePath());
+
+    FileInputStream fis = null;
+    try {
+      if (file.exists()) {
+        fis = new FileInputStream(file);
+
+        carbonProperties.load(fis);
+      }
+    } catch (FileNotFoundException e) {
+      LOGGER.error("The file: " + CarbonCommonConstants.CARBON_PROPERTIES_FILE_PATH
+          + " does not exist");
+    } catch (IOException e) {
+      LOGGER.error("Error while reading the file: "
+          + CarbonCommonConstants.CARBON_PROPERTIES_FILE_PATH);
+    } finally {
+      if (null != fis) {
+        try {
+          fis.close();
+        } catch (IOException e) {
+          LOGGER.error("Error while closing the file stream for file: "
+                  + CarbonCommonConstants.CARBON_PROPERTIES_FILE_PATH);
+        }
+      }
+    }
+
+    print();
+  }
+
+  /**
+   * This method will be used to get the properties value
+   *
+   * @param key
+   * @return properties value
+   */
+  public String getProperty(String key) {
+    //TODO temporary fix
+    if ("carbon.leaf.node.size".equals(key)) {
+      return "120000";
+    }
+    return carbonProperties.getProperty(key);
+  }
+
+  /**
+   * This method will be used to get the properties value if property is not
+   * present then it will return tghe default value
+   *
+   * @param key
+   * @return properties value
+   */
+  public String getProperty(String key, String defaultValue) {
+    String value = getProperty(key);
+    if (null == value) {
+      return defaultValue;
+    }
+    return value;
+  }
+
+  /**
+   * This method will be used to add a new property
+   *
+   * @param key
+   * @return properties value
+   */
+  public void addProperty(String key, String value) {
+    carbonProperties.setProperty(key, value);
+
+  }
+
+  /**
+   * Validate the restrictions
+   *
+   * @param actual
+   * @param max
+   * @param min
+   * @param defaultVal
+   * @return
+   */
+  public long validate(long actual, long max, long min, long defaultVal) {
+    if (actual <= max && actual >= min) {
+      return actual;
+    }
+    return defaultVal;
+  }
+
+  /**
+   * returns major compaction size value from carbon properties or default value if it is not valid
+   *
+   * @return
+   */
+  public long getMajorCompactionSize() {
+    long compactionSize;
+    try {
+      compactionSize = Long.parseLong(getProperty(CarbonCommonConstants.MAJOR_COMPACTION_SIZE,
+          CarbonCommonConstants.DEFAULT_MAJOR_COMPACTION_SIZE));
+    } catch (NumberFormatException e) {
+      compactionSize = Long.parseLong(CarbonCommonConstants.DEFAULT_MAJOR_COMPACTION_SIZE);
+    }
+    return compactionSize;
+  }
+
+  /**
+   * returns the number of loads to be preserved.
+   *
+   * @return
+   */
+  public int getNumberOfSegmentsToBePreserved() {
+    int numberOfSegmentsToBePreserved;
+    try {
+      numberOfSegmentsToBePreserved = Integer.parseInt(
+          getProperty(CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER,
+              CarbonCommonConstants.DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER));
+      // checking min and max . 0  , 100 is min & max.
+      if (numberOfSegmentsToBePreserved < 0 || numberOfSegmentsToBePreserved > 100) {
+        LOGGER.error("The specified value for property "
+            + CarbonCommonConstants.PRESERVE_LATEST_SEGMENTS_NUMBER + " is incorrect."
+            + " Correct value should be in range of 0 -100. Taking the default value.");
+        numberOfSegmentsToBePreserved =
+            Integer.parseInt(CarbonCommonConstants.DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER);
+      }
+    } catch (NumberFormatException e) {
+      numberOfSegmentsToBePreserved =
+          Integer.parseInt(CarbonCommonConstants.DEFAULT_PRESERVE_LATEST_SEGMENTS_NUMBER);
+    }
+    return numberOfSegmentsToBePreserved;
+  }
+
+  public void print() {
+    LOGGER.info("------Using Carbon.properties --------");
+    LOGGER.info(carbonProperties.toString());
+  }
+
+  /**
+   * gettting the unmerged segment numbers to be merged.
+   * @return
+   */
+  public int[] getCompactionSegmentLevelCount() {
+    String commaSeparatedLevels;
+
+    commaSeparatedLevels = getProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD,
+        CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD);
+    int[] compactionSize = getIntArray(commaSeparatedLevels);
+
+    if(null == compactionSize){
+      compactionSize = getIntArray(CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD);
+    }
+
+    return compactionSize;
+  }
+
+  /**
+   *
+   * @param commaSeparatedLevels
+   * @return
+   */
+  private int[] getIntArray(String commaSeparatedLevels) {
+    String[] levels = commaSeparatedLevels.split(",");
+    int[] compactionSize = new int[levels.length];
+    int i = 0;
+    for (String levelSize : levels) {
+      try {
+        int size = Integer.parseInt(levelSize.trim());
+        if(validate(size,100,0,-1) < 0 ){
+          // if given size is out of boundary then take default value for all levels.
+          return null;
+        }
+        compactionSize[i++] = size;
+      }
+      catch(NumberFormatException e){
+        LOGGER.error(
+            "Given value for property" + CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD
+                + " is not proper. Taking the default value "
+                + CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD);
+        return null;
+      }
+    }
+    return compactionSize;
+  }
+
+  /**
+   * Validate the restrictions
+   *
+   * @param actual
+   * @param max
+   * @param min
+   * @param defaultVal
+   * @return
+   */
+  public int validate(int actual, int max, int min, int defaultVal) {
+    if (actual <= max && actual >= min) {
+      return actual;
+    }
+    return defaultVal;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/CarbonTimeStatisticsFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonTimeStatisticsFactory.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonTimeStatisticsFactory.java
new file mode 100644
index 0000000..c7c2b8a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonTimeStatisticsFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+public class CarbonTimeStatisticsFactory {
+  private static String LoadStatisticsInstanceType;
+  private static LoadStatistics LoadStatisticsInstance;
+
+  static {
+    CarbonTimeStatisticsFactory.updateTimeStatisticsUtilStatus();
+    LoadStatisticsInstance = genLoadStatisticsInstance();
+  }
+
+  private static void updateTimeStatisticsUtilStatus() {
+    LoadStatisticsInstanceType = CarbonProperties.getInstance()
+        .getProperty(CarbonCommonConstants.ENABLE_DATA_LOADING_STATISTICS,
+            CarbonCommonConstants.ENABLE_DATA_LOADING_STATISTICS_DEFAULT);
+  }
+
+  private static LoadStatistics genLoadStatisticsInstance() {
+    switch (LoadStatisticsInstanceType.toLowerCase()) {
+      case "false":
+        return CarbonLoadStatisticsDummy.getInstance();
+      case "true":
+        return CarbonLoadStatisticsImpl.getInstance();
+      default:
+        return CarbonLoadStatisticsDummy.getInstance();
+    }
+  }
+
+  public static LoadStatistics getLoadStatisticsInstance() {
+    return LoadStatisticsInstance;
+  }
+
+}



[06/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/logical/AndExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/logical/AndExpression.java b/core/src/main/java/org/carbondata/scan/expression/logical/AndExpression.java
deleted file mode 100644
index b06a232..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/logical/AndExpression.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.logical;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class AndExpression extends BinaryLogicalExpression {
-
-  private static final long serialVersionUID = 1L;
-
-  public AndExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException {
-    ExpressionResult resultLeft = left.evaluate(value);
-    ExpressionResult resultRight = right.evaluate(value);
-    switch (resultLeft.getDataType()) {
-      case BOOLEAN:
-        resultLeft.set(DataType.BOOLEAN, (resultLeft.getBoolean() && resultRight.getBoolean()));
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "Incompatible datatype for applying AND Expression Filter");
-    }
-    return resultLeft;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    // TODO Auto-generated method stub
-    return ExpressionType.AND;
-  }
-
-  @Override public String getString() {
-    // TODO Auto-generated method stub
-    return "And(" + left.getString() + ',' + right.getString() + ')';
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/logical/BinaryLogicalExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/logical/BinaryLogicalExpression.java b/core/src/main/java/org/carbondata/scan/expression/logical/BinaryLogicalExpression.java
deleted file mode 100644
index 63cef25..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/logical/BinaryLogicalExpression.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.logical;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.scan.expression.BinaryExpression;
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.LiteralExpression;
-
-public abstract class BinaryLogicalExpression extends BinaryExpression {
-
-  /**
-   *
-   */
-  private static final long serialVersionUID = 1L;
-
-  public BinaryLogicalExpression(Expression left, Expression right) {
-    super(left, right);
-    // TODO Auto-generated constructor stub
-  }
-
-  public List<ExpressionResult> getLiterals() {
-    List<ExpressionResult> listOfExp =
-        new ArrayList<ExpressionResult>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    getExpressionResultList(this, listOfExp);
-    Collections.sort(listOfExp);
-    return listOfExp;
-  }
-
-  // Will get the column informations involved in the expressions by
-  // traversing the tree
-  public List<ColumnExpression> getColumnList() {
-    // TODO
-    List<ColumnExpression> listOfExp =
-        new ArrayList<ColumnExpression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    getColumnList(this, listOfExp);
-    return listOfExp;
-  }
-
-  private void getColumnList(Expression expression, List<ColumnExpression> lst) {
-    if (expression instanceof ColumnExpression) {
-      ColumnExpression colExp = (ColumnExpression) expression;
-      boolean found = false;
-
-      for (ColumnExpression currentColExp : lst) {
-        if (currentColExp.getColumnName().equals(colExp.getColumnName())) {
-          found = true;
-          colExp.setColIndex(currentColExp.getColIndex());
-          break;
-        }
-      }
-      if (!found) {
-        colExp.setColIndex(lst.size());
-        lst.add(colExp);
-      }
-    }
-    for (Expression child : expression.getChildren()) {
-      getColumnList(child, lst);
-    }
-  }
-
-  public boolean isSingleDimension() {
-    List<ColumnExpression> listOfExp =
-        new ArrayList<ColumnExpression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    getColumnList(this, listOfExp);
-    if (listOfExp.size() == 1 && listOfExp.get(0).isDimension()) {
-      return true;
-    }
-    return false;
-
-  }
-
-  private void getExpressionResultList(Expression binaryConditionalExpression,
-      List<ExpressionResult> listOfExp) {
-    if (binaryConditionalExpression instanceof LiteralExpression) {
-      ExpressionResult colExp =
-          ((LiteralExpression) binaryConditionalExpression).getExpressionResult();
-      listOfExp.add(colExp);
-    }
-    for (Expression child : binaryConditionalExpression.getChildren()) {
-      getExpressionResultList(child, listOfExp);
-    }
-
-  }
-
-  /**
-   * the method will return flag (true or false) depending on the existence of the
-   * direct dictionary columns in conditional expression
-   *
-   * @return the method will return flag (true or false)
-   */
-  public boolean isDirectDictionaryColumns() {
-    List<ColumnExpression> listOfExp =
-        new ArrayList<ColumnExpression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    getColumnList(this, listOfExp);
-    for (ColumnExpression ce : listOfExp) {
-      if (!ce.getCarbonColumn().hasEncoding(Encoding.DICTIONARY)) {
-        return true;
-      }
-    }
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/logical/NotExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/logical/NotExpression.java b/core/src/main/java/org/carbondata/scan/expression/logical/NotExpression.java
deleted file mode 100644
index 0d0128a..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/logical/NotExpression.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.logical;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.UnaryExpression;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class NotExpression extends UnaryExpression {
-  private static final long serialVersionUID = 1L;
-
-  public NotExpression(Expression child) {
-    super(child);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterIllegalMemberException, FilterUnsupportedException {
-    ExpressionResult expResult = child.evaluate(value);
-    expResult.set(DataType.BOOLEAN, !(expResult.getBoolean()));
-    switch (expResult.getDataType()) {
-      case BOOLEAN:
-        expResult.set(DataType.BOOLEAN, !(expResult.getBoolean()));
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "Incompatible datatype for applying NOT Expression Filter");
-    }
-    return expResult;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.NOT;
-  }
-
-  @Override public String getString() {
-    return "Not(" + child.getString() + ')';
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/logical/OrExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/logical/OrExpression.java b/core/src/main/java/org/carbondata/scan/expression/logical/OrExpression.java
deleted file mode 100644
index 69f699c..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/logical/OrExpression.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression.logical;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class OrExpression extends BinaryLogicalExpression {
-
-  private static final long serialVersionUID = 4220598043176438380L;
-
-  public OrExpression(Expression left, Expression right) {
-    super(left, right);
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value)
-      throws FilterIllegalMemberException, FilterUnsupportedException {
-    ExpressionResult resultLeft = left.evaluate(value);
-    ExpressionResult resultRight = right.evaluate(value);
-    switch (resultLeft.getDataType()) {
-      case BOOLEAN:
-        resultLeft.set(DataType.BOOLEAN, (resultLeft.getBoolean() || resultRight.getBoolean()));
-        break;
-      default:
-        throw new FilterUnsupportedException(
-            "Incompatible datatype for applying OR Expression Filter");
-    }
-
-    return resultLeft;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    return ExpressionType.OR;
-  }
-
-  @Override public String getString() {
-    return "Or(" + left.getString() + ',' + right.getString() + ')';
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/DimColumnFilterInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/DimColumnFilterInfo.java b/core/src/main/java/org/carbondata/scan/filter/DimColumnFilterInfo.java
deleted file mode 100644
index e3fd3aa..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/DimColumnFilterInfo.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter;
-
-import java.io.Serializable;
-import java.util.List;
-
-public class DimColumnFilterInfo implements Serializable {
-
-  private static final long serialVersionUID = 8181578747306832771L;
-
-  private boolean isIncludeFilter;
-
-  private List<Integer> filterList;
-
-  /**
-   * maintain the no dictionary filter values list.
-   */
-  private List<byte[]> noDictionaryFilterValuesList;
-
-  public List<byte[]> getNoDictionaryFilterValuesList() {
-    return noDictionaryFilterValuesList;
-  }
-
-  public boolean isIncludeFilter() {
-    return isIncludeFilter;
-  }
-
-  public void setIncludeFilter(boolean isIncludeFilter) {
-    this.isIncludeFilter = isIncludeFilter;
-  }
-
-  public List<Integer> getFilterList() {
-    return filterList;
-  }
-
-  public void setFilterList(List<Integer> filterList) {
-    this.filterList = filterList;
-  }
-
-  public void setFilterListForNoDictionaryCols(List<byte[]> noDictionaryFilterValuesList) {
-    this.noDictionaryFilterValuesList = noDictionaryFilterValuesList;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/FilterExpressionProcessor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/FilterExpressionProcessor.java b/core/src/main/java/org/carbondata/scan/filter/FilterExpressionProcessor.java
deleted file mode 100644
index 6543af6..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/FilterExpressionProcessor.java
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter;
-
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.carbon.datastore.DataRefNodeFinder;
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.carbon.datastore.block.AbstractIndex;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.impl.btree.BTreeDataRefNodeFinder;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.keygenerator.KeyGenException;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.expression.BinaryExpression;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.conditional.BinaryConditionalExpression;
-import org.carbondata.scan.expression.conditional.ConditionalExpression;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.executer.FilterExecuter;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.resolver.ConditionalFilterResolverImpl;
-import org.carbondata.scan.filter.resolver.FilterResolverIntf;
-import org.carbondata.scan.filter.resolver.LogicalFilterResolverImpl;
-import org.carbondata.scan.filter.resolver.RowLevelFilterResolverImpl;
-import org.carbondata.scan.filter.resolver.RowLevelRangeFilterResolverImpl;
-
-public class FilterExpressionProcessor implements FilterProcessor {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(FilterExpressionProcessor.class.getName());
-
-  /**
-   * Implementation will provide the resolved form of filters based on the
-   * filter expression tree which is been passed in Expression instance.
-   *
-   * @param expressionTree  , filter expression tree
-   * @param tableIdentifier ,contains carbon store informations
-   * @return a filter resolver tree
-   * @throws QueryExecutionException
-   * @throws FilterUnsupportedException
-   */
-  public FilterResolverIntf getFilterResolver(Expression expressionTree,
-      AbsoluteTableIdentifier tableIdentifier) throws FilterUnsupportedException {
-    if (null != expressionTree && null != tableIdentifier) {
-      return getFilterResolvertree(expressionTree, tableIdentifier);
-    }
-    return null;
-  }
-
-  /**
-   * This API will scan the Segment level all btrees and selects the required
-   * block reference  nodes inorder to push the same to executer for applying filters
-   * on the respective data reference node.
-   * Following Algorithm is followed in below API
-   * Step:1 Get the start end key based on the filter tree resolver information
-   * Step:2 Prepare the IndexKeys inorder to scan the tree and get the start and end reference
-   * node(block)
-   * Step:3 Once data reference node ranges retrieved traverse the node within this range
-   * and select the node based on the block min and max value and the filter value.
-   * Step:4 The selected blocks will be send to executers for applying the filters with the help
-   * of Filter executers.
-   *
-   * @throws QueryExecutionException
-   */
-  public List<DataRefNode> getFilterredBlocks(DataRefNode btreeNode,
-      FilterResolverIntf filterResolver, AbstractIndex tableSegment,
-      AbsoluteTableIdentifier tableIdentifier) throws QueryExecutionException {
-    // Need to get the current dimension tables
-    List<DataRefNode> listOfDataBlocksToScan = new ArrayList<DataRefNode>();
-    // getting the start and end index key based on filter for hitting the
-    // selected block reference nodes based on filter resolver tree.
-    LOGGER.debug("preparing the start and end key for finding"
-        + "start and end block as per filter resolver");
-    List<IndexKey> listOfStartEndKeys = new ArrayList<IndexKey>(2);
-    FilterUtil.traverseResolverTreeAndGetStartAndEndKey(tableSegment.getSegmentProperties(),
-        tableIdentifier, filterResolver, listOfStartEndKeys);
-    // reading the first value from list which has start key
-    IndexKey searchStartKey = listOfStartEndKeys.get(0);
-    // reading the last value from list which has end key
-    IndexKey searchEndKey = listOfStartEndKeys.get(1);
-    if (null == searchStartKey && null == searchEndKey) {
-      try {
-        // TODO need to handle for no dictionary dimensions
-        searchStartKey =
-            FilterUtil.prepareDefaultStartIndexKey(tableSegment.getSegmentProperties());
-        // TODO need to handle for no dictionary dimensions
-        searchEndKey = FilterUtil.prepareDefaultEndIndexKey(tableSegment.getSegmentProperties());
-      } catch (KeyGenException e) {
-        return listOfDataBlocksToScan;
-      }
-    }
-
-    LOGGER.debug(
-        "Successfully retrieved the start and end key" + "Dictionary Start Key: " + searchStartKey
-            .getDictionaryKeys() + "No Dictionary Start Key " + searchStartKey.getNoDictionaryKeys()
-            + "Dictionary End Key: " + searchEndKey.getDictionaryKeys() + "No Dictionary End Key "
-            + searchEndKey.getNoDictionaryKeys());
-    long startTimeInMillis = System.currentTimeMillis();
-    DataRefNodeFinder blockFinder = new BTreeDataRefNodeFinder(
-        tableSegment.getSegmentProperties().getEachDimColumnValueSize());
-    DataRefNode startBlock = blockFinder.findFirstDataBlock(btreeNode, searchStartKey);
-    DataRefNode endBlock = blockFinder.findLastDataBlock(btreeNode, searchEndKey);
-    FilterExecuter filterExecuter =
-        FilterUtil.getFilterExecuterTree(filterResolver, tableSegment.getSegmentProperties(),null);
-    while (startBlock != endBlock) {
-      addBlockBasedOnMinMaxValue(filterExecuter, listOfDataBlocksToScan, startBlock,
-          tableSegment.getSegmentProperties());
-      startBlock = startBlock.getNextDataRefNode();
-    }
-    addBlockBasedOnMinMaxValue(filterExecuter, listOfDataBlocksToScan, endBlock,
-        tableSegment.getSegmentProperties());
-    LOGGER.info("Total Time in retrieving the data reference node" + "after scanning the btree " + (
-        System.currentTimeMillis() - startTimeInMillis)
-        + " Total number of data reference node for executing filter(s) " + listOfDataBlocksToScan
-        .size());
-
-    return listOfDataBlocksToScan;
-  }
-
-  /**
-   * Selects the blocks based on col max and min value.
-   *
-   * @param filterResolver
-   * @param listOfDataBlocksToScan
-   * @param dataRefNode
-   * @param segmentProperties
-   */
-  private void addBlockBasedOnMinMaxValue(FilterExecuter filterExecuter,
-      List<DataRefNode> listOfDataBlocksToScan, DataRefNode dataRefNode,
-      SegmentProperties segmentProperties) {
-
-    BitSet bitSet = filterExecuter
-        .isScanRequired(dataRefNode.getColumnsMaxValue(), dataRefNode.getColumnsMinValue());
-    if (!bitSet.isEmpty()) {
-      listOfDataBlocksToScan.add(dataRefNode);
-
-    }
-  }
-
-  /**
-   * API will return a filter resolver instance which will be used by
-   * executers to evaluate or execute the filters.
-   *
-   * @param expressionTree , resolver tree which will hold the resolver tree based on
-   *                       filter expression.
-   * @return FilterResolverIntf type.
-   * @throws QueryExecutionException
-   * @throws FilterUnsupportedException
-   */
-  private FilterResolverIntf getFilterResolvertree(Expression expressionTree,
-      AbsoluteTableIdentifier tableIdentifier) throws FilterUnsupportedException {
-    FilterResolverIntf filterEvaluatorTree =
-        createFilterResolverTree(expressionTree, tableIdentifier, null);
-    traverseAndResolveTree(filterEvaluatorTree, tableIdentifier);
-    return filterEvaluatorTree;
-  }
-
-  /**
-   * constructing the filter resolver tree based on filter expression.
-   * this method will visit each node of the filter resolver and prepares
-   * the surrogates of the filter members which are involved filter
-   * expression.
-   *
-   * @param filterResolverTree
-   * @param tableIdentifier
-   * @throws FilterUnsupportedException
-   * @throws QueryExecutionException
-   */
-  private void traverseAndResolveTree(FilterResolverIntf filterResolverTree,
-      AbsoluteTableIdentifier tableIdentifier) throws FilterUnsupportedException {
-    if (null == filterResolverTree) {
-      return;
-    }
-    traverseAndResolveTree(filterResolverTree.getLeft(), tableIdentifier);
-
-    filterResolverTree.resolve(tableIdentifier);
-
-    traverseAndResolveTree(filterResolverTree.getRight(), tableIdentifier);
-  }
-
-  /**
-   * Pattern used : Visitor Pattern
-   * Method will create filter resolver tree based on the filter expression tree,
-   * in this algorithm based on the expression instance the resolvers will created
-   *
-   * @param expressionTree
-   * @param tableIdentifier
-   * @return
-   */
-  private FilterResolverIntf createFilterResolverTree(Expression expressionTree,
-      AbsoluteTableIdentifier tableIdentifier, Expression intermediateExpression) {
-    ExpressionType filterExpressionType = expressionTree.getFilterExpressionType();
-    BinaryExpression currentExpression = null;
-    switch (filterExpressionType) {
-      case OR:
-        currentExpression = (BinaryExpression) expressionTree;
-        return new LogicalFilterResolverImpl(
-            createFilterResolverTree(currentExpression.getLeft(), tableIdentifier,
-                currentExpression),
-            createFilterResolverTree(currentExpression.getRight(), tableIdentifier,
-                currentExpression),currentExpression);
-      case AND:
-        currentExpression = (BinaryExpression) expressionTree;
-        return new LogicalFilterResolverImpl(
-            createFilterResolverTree(currentExpression.getLeft(), tableIdentifier,
-                currentExpression),
-            createFilterResolverTree(currentExpression.getRight(), tableIdentifier,
-                currentExpression), currentExpression);
-      case EQUALS:
-      case IN:
-        return getFilterResolverBasedOnExpressionType(ExpressionType.EQUALS, false, expressionTree,
-            tableIdentifier, expressionTree);
-      case GREATERTHAN:
-      case GREATERTHAN_EQUALTO:
-      case LESSTHAN:
-      case LESSTHAN_EQUALTO:
-        return getFilterResolverBasedOnExpressionType(ExpressionType.EQUALS, true, expressionTree,
-            tableIdentifier, expressionTree);
-
-      case NOT_EQUALS:
-      case NOT_IN:
-        return getFilterResolverBasedOnExpressionType(ExpressionType.NOT_EQUALS, false,
-            expressionTree, tableIdentifier, expressionTree);
-
-      default:
-        return getFilterResolverBasedOnExpressionType(ExpressionType.UNKNOWN, false, expressionTree,
-            tableIdentifier, expressionTree);
-    }
-  }
-
-  /**
-   * Factory method which will return the resolver instance based on filter expression
-   * expressions.
-   */
-  private FilterResolverIntf getFilterResolverBasedOnExpressionType(
-      ExpressionType filterExpressionType, boolean isExpressionResolve, Expression expression,
-      AbsoluteTableIdentifier tableIdentifier, Expression expressionTree) {
-    BinaryConditionalExpression currentCondExpression = null;
-    ConditionalExpression condExpression = null;
-    switch (filterExpressionType) {
-      case EQUALS:
-        currentCondExpression = (BinaryConditionalExpression) expression;
-        if (currentCondExpression.isSingleDimension()
-            && currentCondExpression.getColumnList().get(0).getCarbonColumn().getDataType()
-            != DataType.ARRAY
-            && currentCondExpression.getColumnList().get(0).getCarbonColumn().getDataType()
-            != DataType.STRUCT) {
-          // getting new dim index.
-          if (!currentCondExpression.getColumnList().get(0).getCarbonColumn()
-              .hasEncoding(Encoding.DICTIONARY) || currentCondExpression.getColumnList().get(0)
-              .getCarbonColumn().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-            if (FilterUtil.checkIfExpressionContainsColumn(currentCondExpression.getLeft())
-                && FilterUtil.checkIfExpressionContainsColumn(currentCondExpression.getRight()) || (
-                FilterUtil.checkIfRightExpressionRequireEvaluation(currentCondExpression.getRight())
-                    || FilterUtil
-                    .checkIfLeftExpressionRequireEvaluation(currentCondExpression.getLeft()))) {
-              return new RowLevelFilterResolverImpl(expression, isExpressionResolve, true,
-                  tableIdentifier);
-            }
-            if (currentCondExpression.getFilterExpressionType() == ExpressionType.GREATERTHAN
-                || currentCondExpression.getFilterExpressionType() == ExpressionType.LESSTHAN
-                || currentCondExpression.getFilterExpressionType()
-                == ExpressionType.GREATERTHAN_EQUALTO
-                || currentCondExpression.getFilterExpressionType()
-                == ExpressionType.LESSTHAN_EQUALTO) {
-              return new RowLevelRangeFilterResolverImpl(expression, isExpressionResolve, true,
-                  tableIdentifier);
-            }
-          }
-          return new ConditionalFilterResolverImpl(expression, isExpressionResolve, true);
-
-        }
-        break;
-      case NOT_EQUALS:
-        currentCondExpression = (BinaryConditionalExpression) expression;
-        if (currentCondExpression.isSingleDimension()
-            && currentCondExpression.getColumnList().get(0).getCarbonColumn().getDataType()
-            != DataType.ARRAY
-            && currentCondExpression.getColumnList().get(0).getCarbonColumn().getDataType()
-            != DataType.STRUCT) {
-          if (!currentCondExpression.getColumnList().get(0).getCarbonColumn()
-              .hasEncoding(Encoding.DICTIONARY) || currentCondExpression.getColumnList().get(0)
-              .getCarbonColumn().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-            if (FilterUtil.checkIfExpressionContainsColumn(currentCondExpression.getLeft())
-                && FilterUtil.checkIfExpressionContainsColumn(currentCondExpression.getRight()) || (
-                FilterUtil.checkIfRightExpressionRequireEvaluation(currentCondExpression.getRight())
-                    || FilterUtil
-                    .checkIfLeftExpressionRequireEvaluation(currentCondExpression.getLeft()))) {
-              return new RowLevelFilterResolverImpl(expression, isExpressionResolve, false,
-                  tableIdentifier);
-            }
-            if (expressionTree.getFilterExpressionType() == ExpressionType.GREATERTHAN
-                || expressionTree.getFilterExpressionType() == ExpressionType.LESSTHAN
-                || expressionTree.getFilterExpressionType() == ExpressionType.GREATERTHAN_EQUALTO
-                || expressionTree.getFilterExpressionType() == ExpressionType.LESSTHAN_EQUALTO) {
-
-              return new RowLevelRangeFilterResolverImpl(expression, isExpressionResolve, false,
-                  tableIdentifier);
-            }
-
-            return new ConditionalFilterResolverImpl(expression, isExpressionResolve, false);
-          }
-          return new ConditionalFilterResolverImpl(expression, isExpressionResolve, false);
-        }
-        break;
-      default:
-        condExpression = (ConditionalExpression) expression;
-        if (condExpression.isSingleDimension()
-            && condExpression.getColumnList().get(0).getCarbonColumn().getDataType()
-            != DataType.ARRAY
-            && condExpression.getColumnList().get(0).getCarbonColumn().getDataType()
-            != DataType.STRUCT) {
-          condExpression = (ConditionalExpression) expression;
-          if (condExpression.getColumnList().get(0).getCarbonColumn()
-              .hasEncoding(Encoding.DICTIONARY) && !condExpression.getColumnList().get(0)
-              .getCarbonColumn().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-            return new ConditionalFilterResolverImpl(expression, true, true);
-          } else {
-            return new RowLevelFilterResolverImpl(expression, false, false, tableIdentifier);
-          }
-        } else {
-          return new RowLevelFilterResolverImpl(expression, false, false, tableIdentifier);
-        }
-    }
-    return new RowLevelFilterResolverImpl(expression, false, false, tableIdentifier);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/FilterProcessor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/FilterProcessor.java b/core/src/main/java/org/carbondata/scan/filter/FilterProcessor.java
deleted file mode 100644
index 3531621..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/FilterProcessor.java
+++ /dev/null
@@ -1,60 +0,0 @@
-package org.carbondata.scan.filter;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.util.List;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.carbon.datastore.block.AbstractIndex;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.resolver.FilterResolverIntf;
-
-public interface FilterProcessor {
-
-  /**
-   * API will provide the resolved form of filters based on the filter
-   * expression tree which is been passed.
-   *
-   * @param expressionTree  , filter expression tree
-   * @param tableIdentifier ,contains carbon store informations.
-   * @return
-   * @throws QueryExecutionException
-   * @throws FilterUnsupportedException
-   */
-  FilterResolverIntf getFilterResolver(Expression expressionTree,
-      AbsoluteTableIdentifier tableIdentifier) throws FilterUnsupportedException;
-
-  /**
-   * This API is exposed inorder to get the required block reference node
-   * based on the filter.The block list will be send to the executer tasks inorder
-   * to apply filters.
-   *
-   * @param filterResolver DataBlock list with resolved filters
-   * @return list of DataRefNode.
-   * @throws QueryExecutionException
-   */
-  List<DataRefNode> getFilterredBlocks(DataRefNode dataRefNode, FilterResolverIntf filterResolver,
-      AbstractIndex segmentIndexBuilder, AbsoluteTableIdentifier tableIdentifier)
-      throws QueryExecutionException;
-
-}


[42/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreInfo.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreInfo.java
new file mode 100644
index 0000000..8f0621e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreInfo.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+import org.apache.carbondata.core.keygenerator.mdkey.NumberCompressor;
+
+public class ColumnarKeyStoreInfo {
+  private int numberOfKeys;
+
+  private int[] sizeOfEachBlock;
+
+  private int[] keyBlockLengths;
+
+  private long[] keyBlockOffsets;
+
+  private int[] keyBlockIndexLength;
+
+  private long[] keyBlockIndexOffsets;
+
+  private String filePath;
+
+  private boolean[] isSorted;
+
+  private int[] cardinality;
+
+  private NumberCompressor numberCompressor;
+
+  private NumberCompressor[] keyBlockUnCompressor;
+
+  private ColumnGroupModel hybridStoreModel;
+
+  /**
+   * dataIndexMap
+   */
+  private int[] dataIndexMapLength;
+
+  /**
+   * dataIndexMap
+   */
+  private long[] dataIndexMapOffsets;
+
+  /**
+   * aggKeyBlock
+   */
+  private boolean[] aggKeyBlock;
+
+  /**
+   * @return the numberOfKeys
+   */
+  public int getNumberOfKeys() {
+    return numberOfKeys;
+  }
+
+  /**
+   * @param numberOfKeys the numberOfKeys to set
+   */
+  public void setNumberOfKeys(int numberOfKeys) {
+    this.numberOfKeys = numberOfKeys;
+  }
+
+  /**
+   * @return the sizeOfEachBlock
+   */
+  public int[] getSizeOfEachBlock() {
+    return sizeOfEachBlock;
+  }
+
+  /**
+   * @param sizeOfEachBlock the sizeOfEachBlock to set
+   */
+  public void setSizeOfEachBlock(int[] sizeOfEachBlock) {
+    this.sizeOfEachBlock = sizeOfEachBlock;
+  }
+
+  /**
+   * @return the keyBlockLengths
+   */
+  public int[] getKeyBlockLengths() {
+    return keyBlockLengths;
+  }
+
+  /**
+   * @param keyBlockLengths the keyBlockLengths to set
+   */
+  public void setKeyBlockLengths(int[] keyBlockLengths) {
+    this.keyBlockLengths = keyBlockLengths;
+  }
+
+  /**
+   * @return the keyBlockOffsets
+   */
+  public long[] getKeyBlockOffsets() {
+    return keyBlockOffsets;
+  }
+
+  /**
+   * @param keyBlockOffsets the keyBlockOffsets to set
+   */
+  public void setKeyBlockOffsets(long[] keyBlockOffsets) {
+    this.keyBlockOffsets = keyBlockOffsets;
+  }
+
+  /**
+   * @return the keyBlockIndexLength
+   */
+  public int[] getKeyBlockIndexLength() {
+    return keyBlockIndexLength;
+  }
+
+  /**
+   * @param keyBlockIndexLength the keyBlockIndexLength to set
+   */
+  public void setKeyBlockIndexLength(int[] keyBlockIndexLength) {
+    this.keyBlockIndexLength = keyBlockIndexLength;
+  }
+
+  /**
+   * @return the keyBlockIndexOffsets
+   */
+  public long[] getKeyBlockIndexOffsets() {
+    return keyBlockIndexOffsets;
+  }
+
+  /**
+   * @param keyBlockIndexOffsets the keyBlockIndexOffsets to set
+   */
+  public void setKeyBlockIndexOffsets(long[] keyBlockIndexOffsets) {
+    this.keyBlockIndexOffsets = keyBlockIndexOffsets;
+  }
+
+  /**
+   * @return the filePath
+   */
+  public String getFilePath() {
+    return filePath;
+  }
+
+  /**
+   * @param filePath the filePath to set
+   */
+  public void setFilePath(String filePath) {
+    this.filePath = filePath;
+  }
+
+  /**
+   * @return the isSorted
+   */
+  public boolean[] getIsSorted() {
+    return isSorted;
+  }
+
+  /**
+   * @param isSorted the isSorted to set
+   */
+  public void setIsSorted(boolean[] isSorted) {
+    this.isSorted = isSorted;
+  }
+
+  /**
+   * @return the numberCompressor
+   */
+  public NumberCompressor getNumberCompressor() {
+    return numberCompressor;
+  }
+
+  /**
+   * @param numberCompressor the numberCompressor to set
+   */
+  public void setNumberCompressor(NumberCompressor numberCompressor) {
+    this.numberCompressor = numberCompressor;
+  }
+
+  /**
+   * @return the dataIndexMapLength
+   */
+  public int[] getDataIndexMapLength() {
+    return dataIndexMapLength;
+  }
+
+  /**
+   * @param dataIndexMapLength the dataIndexMapLength to set
+   */
+  public void setDataIndexMapLength(int[] dataIndexMapLength) {
+    this.dataIndexMapLength = dataIndexMapLength;
+  }
+
+  /**
+   * @return the dataIndexMapOffsets
+   */
+  public long[] getDataIndexMapOffsets() {
+    return dataIndexMapOffsets;
+  }
+
+  /**
+   * @param dataIndexMapOffsets the dataIndexMapOffsets to set
+   */
+  public void setDataIndexMapOffsets(long[] dataIndexMapOffsets) {
+    this.dataIndexMapOffsets = dataIndexMapOffsets;
+  }
+
+  /**
+   * @return the aggKeyBlock
+   */
+  public boolean[] getAggKeyBlock() {
+    return aggKeyBlock;
+  }
+
+  /**
+   * @param aggKeyBlock the aggKeyBlock to set
+   */
+  public void setAggKeyBlock(boolean[] aggKeyBlock) {
+    this.aggKeyBlock = aggKeyBlock;
+  }
+
+  /**
+   * @return the keyBlockUnCompressor
+   */
+  public NumberCompressor[] getKeyBlockUnCompressor() {
+    return keyBlockUnCompressor;
+  }
+
+  /**
+   * @param keyBlockUnCompressor the keyBlockUnCompressor to set
+   */
+  public void setKeyBlockUnCompressor(NumberCompressor[] keyBlockUnCompressor) {
+    this.keyBlockUnCompressor = keyBlockUnCompressor;
+  }
+
+  public int[] getCardinality() {
+    return cardinality;
+  }
+
+  public void setCardinality(int[] cardinality) {
+    this.cardinality = cardinality;
+  }
+
+  public ColumnGroupModel getHybridStoreModel() {
+    return hybridStoreModel;
+  }
+
+  public void setHybridStoreModel(ColumnGroupModel hybridStoreModel) {
+    this.hybridStoreModel = hybridStoreModel;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreMetadata.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreMetadata.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreMetadata.java
new file mode 100644
index 0000000..a9dcb35
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/ColumnarKeyStoreMetadata.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.core.keygenerator.factory.KeyGeneratorFactory;
+
+public class ColumnarKeyStoreMetadata {
+  private boolean isSorted;
+
+  private int[] columnIndex;
+
+  private int[] columnReverseIndex;
+
+  private int eachRowSize;
+
+  private int[] dataIndex;
+
+  private boolean isUnCompressed;
+
+  private KeyGenerator keyGenerator;
+
+  /**
+   * isNoDictionaryValColumn.
+   */
+  private boolean isNoDictionaryValColumn;
+  private boolean isRowStore;
+
+  public ColumnarKeyStoreMetadata(int eachRowSize) {
+    this.eachRowSize = eachRowSize;
+    keyGenerator = KeyGeneratorFactory.getKeyGenerator(new int[] { eachRowSize });
+  }
+
+  /**
+   * @return the isSorted
+   */
+  public boolean isSorted() {
+    return isSorted;
+  }
+
+  /**
+   * @param isSorted the isSorted to set
+   */
+  public void setSorted(boolean isSorted) {
+    this.isSorted = isSorted;
+  }
+
+  /**
+   * @return the columnIndex
+   */
+  public int[] getColumnIndex() {
+    return columnIndex;
+  }
+
+  /**
+   * @param columnIndex the columnIndex to set
+   */
+  public void setColumnIndex(int[] columnIndex) {
+    this.columnIndex = columnIndex;
+  }
+
+  /**
+   * @return the eachRowSize
+   */
+  public int getEachRowSize() {
+    return eachRowSize;
+  }
+
+  /**
+   * @return the dataIndex
+   */
+  public int[] getDataIndex() {
+    return dataIndex;
+  }
+
+  /**
+   * @param dataIndex the dataIndex to set
+   */
+  public void setDataIndex(int[] dataIndex) {
+    this.dataIndex = dataIndex;
+  }
+
+  /**
+   * @return the columnReverseIndex
+   */
+  public int[] getColumnReverseIndex() {
+    return columnReverseIndex;
+  }
+
+  /**
+   * @param columnReverseIndex the columnReverseIndex to set
+   */
+  public void setColumnReverseIndex(int[] columnReverseIndex) {
+    this.columnReverseIndex = columnReverseIndex;
+  }
+
+  public boolean isUnCompressed() {
+    return isUnCompressed;
+  }
+
+  public void setUnCompressed(boolean isUnCompressed) {
+    this.isUnCompressed = isUnCompressed;
+  }
+
+  public KeyGenerator getKeyGenerator() {
+    return keyGenerator;
+  }
+
+  public boolean isRowStore() {
+    return isRowStore;
+  }
+
+  public void setRowStore(boolean isRowStore) {
+    this.isRowStore = isRowStore;
+  }
+
+  /**
+   * @return
+   */
+  public boolean isNoDictionaryValColumn() {
+    return isNoDictionaryValColumn;
+
+  }
+
+  /**
+   * @param isNoDictionaryValColumn
+   */
+  public void setNoDictionaryValColumn(boolean isNoDictionaryValColumn) {
+    this.isNoDictionaryValColumn = isNoDictionaryValColumn;
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/IndexStorage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/IndexStorage.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/IndexStorage.java
new file mode 100644
index 0000000..e1f4548
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/IndexStorage.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+public interface IndexStorage<T> {
+  boolean isAlreadySorted();
+
+  T getDataAfterComp();
+
+  T getIndexMap();
+
+  byte[][] getKeyBlock();
+
+  T getDataIndexMap();
+
+  int getTotalSize();
+
+  /**
+   * @return min value of block
+   */
+  byte[] getMin();
+
+  /**
+   * @return max value of block
+   */
+  byte[] getMax();
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/UnBlockIndexer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/UnBlockIndexer.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/UnBlockIndexer.java
new file mode 100644
index 0000000..149facb
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/columnar/UnBlockIndexer.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.columnar;
+
+import java.util.Arrays;
+
+public final class UnBlockIndexer {
+
+  private UnBlockIndexer() {
+
+  }
+
+  public static int[] uncompressIndex(int[] indexData, int[] indexMap) {
+    int actualSize = indexData.length;
+    for (int i = 0; i < indexMap.length; i++) {
+      actualSize += indexData[indexMap[i] + 1] - indexData[indexMap[i]] - 1;
+    }
+    int[] indexes = new int[actualSize];
+    int k = 0;
+    for (int i = 0; i < indexData.length; i++) {
+      int index = Arrays.binarySearch(indexMap, i);
+      if (index > -1) {
+        for (int j = indexData[indexMap[index]]; j <= indexData[indexMap[index] + 1]; j++) {
+          indexes[k] = j;
+          k++;
+        }
+        i++;
+      } else {
+        indexes[k] = indexData[i];
+        k++;
+      }
+    }
+    return indexes;
+  }
+
+  public static byte[] uncompressData(byte[] data, int[] index, int keyLen) {
+    if (index.length < 1) {
+      return data;
+    }
+    int numberOfCopy = 0;
+    int actualSize = 0;
+    int srcPos = 0;
+    int destPos = 0;
+    for (int i = 1; i < index.length; i += 2) {
+      actualSize += index[i];
+    }
+    byte[] uncompressedData = new byte[actualSize * keyLen];
+    int picIndex = 0;
+    for (int i = 0; i < data.length; i += keyLen) {
+      numberOfCopy = index[picIndex * 2 + 1];
+      picIndex++;
+      for (int j = 0; j < numberOfCopy; j++) {
+        System.arraycopy(data, srcPos, uncompressedData, destPos, keyLen);
+        destPos += keyLen;
+      }
+      srcPos += keyLen;
+    }
+    return uncompressedData;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/Compressor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/Compressor.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/Compressor.java
new file mode 100644
index 0000000..67d4cc1
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/Compressor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression;
+
+public interface Compressor<T> {
+
+  byte[] compress(T input);
+
+  T unCompress(byte[] input);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/MeasureMetaDataModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/MeasureMetaDataModel.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/MeasureMetaDataModel.java
new file mode 100644
index 0000000..7d212bb
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/MeasureMetaDataModel.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression;
+
+public class MeasureMetaDataModel {
+  /**
+   * maxValue
+   */
+  private Object[] maxValue;
+
+  /**
+   * minValue
+   */
+  private Object[] minValue;
+
+  /**
+   * decimal
+   */
+  private int[] decimal;
+
+  /**
+   * measureCount
+   */
+  private int measureCount;
+
+  /**
+   * uniqueValue
+   */
+  private Object[] uniqueValue;
+
+  /**
+   * type
+   */
+  private char[] type;
+
+  /**
+   * dataTypeSelected
+   */
+  private byte[] dataTypeSelected;
+
+  private Object[] minValueFactForAgg;
+
+  public MeasureMetaDataModel() {
+
+  }
+
+  /**
+   * MeasureMetaDataModel Constructor
+   *
+   * @param minValue
+   * @param maxValue
+   * @param decimal
+   * @param measureCount
+   * @param uniqueValue
+   * @param type
+   */
+  public MeasureMetaDataModel(Object[] minValue, Object[] maxValue, int[] decimal, int measureCount,
+      Object[] uniqueValue, char[] type, byte[] dataTypeSelected) {
+    this.minValue = minValue;
+    this.maxValue = maxValue;
+    this.decimal = decimal;
+    this.measureCount = measureCount;
+    this.uniqueValue = uniqueValue;
+    this.type = type;
+    this.dataTypeSelected = dataTypeSelected;
+  }
+
+  /**
+   * get Max value
+   *
+   * @return
+   */
+  public Object[] getMaxValue() {
+    return maxValue;
+  }
+
+  /**
+   * set max value
+   *
+   * @param maxValue
+   */
+  public void setMaxValue(Object[] maxValue) {
+    this.maxValue = maxValue;
+  }
+
+  /**
+   * getMinValue
+   *
+   * @return
+   */
+  public Object[] getMinValue() {
+    return minValue;
+  }
+
+  /**
+   * setMinValue
+   *
+   * @param minValue
+   */
+  public void setMinValue(Object[] minValue) {
+    this.minValue = minValue;
+  }
+
+  /**
+   * getDecimal
+   *
+   * @return
+   */
+  public int[] getDecimal() {
+    return decimal;
+  }
+
+  /**
+   * setDecimal
+   *
+   * @param decimal
+   */
+  public void setDecimal(int[] decimal) {
+    this.decimal = decimal;
+  }
+
+  /**
+   * getMeasureCount
+   *
+   * @return
+   */
+  public int getMeasureCount() {
+    return measureCount;
+  }
+
+  /**
+   * setMeasureCount
+   *
+   * @param measureCount
+   */
+  public void setMeasureCount(int measureCount) {
+    this.measureCount = measureCount;
+  }
+
+  /**
+   * getUniqueValue
+   *
+   * @return
+   */
+  public Object[] getUniqueValue() {
+    return uniqueValue;
+  }
+
+  /**
+   * setUniqueValue
+   *
+   * @param uniqueValue
+   */
+  public void setUniqueValue(Object[] uniqueValue) {
+    this.uniqueValue = uniqueValue;
+  }
+
+  /**
+   * @return the type
+   */
+  public char[] getType() {
+    return type;
+  }
+
+  /**
+   * @param type the type to set
+   */
+  public void setType(char[] type) {
+    this.type = type;
+  }
+
+  /**
+   * @return the dataTypeSelected
+   */
+  public byte[] getDataTypeSelected() {
+    return dataTypeSelected;
+  }
+
+  /**
+   * @param dataTypeSelected the dataTypeSelected to set
+   */
+  public void setDataTypeSelected(byte[] dataTypeSelected) {
+    this.dataTypeSelected = dataTypeSelected;
+  }
+
+  /**
+   * @return the minValueFactForAgg
+   */
+  public Object[] getMinValueFactForAgg() {
+    return minValueFactForAgg;
+  }
+
+  /**
+   * @param minValueFactForAgg the minValueFactForAgg to set
+   */
+  public void setMinValueFactForAgg(Object[] minValueFactForAgg) {
+    this.minValueFactForAgg = minValueFactForAgg;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/SnappyCompression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/SnappyCompression.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/SnappyCompression.java
new file mode 100644
index 0000000..31717a9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/SnappyCompression.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression;
+
+import java.io.IOException;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+
+import org.xerial.snappy.Snappy;
+
+public class SnappyCompression {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(SnappyCompression.class.getName());
+
+  /**
+   * SnappyByteCompression.
+   */
+  public static enum SnappyByteCompression implements Compressor<byte[]> {
+    /**
+     *
+     */
+    INSTANCE;
+
+    /**
+     * wrapper method for compressing byte[] unCompInput.
+     */
+    public byte[] compress(byte[] unCompInput) {
+      try {
+        return Snappy.rawCompress(unCompInput, unCompInput.length);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+        return null;
+      }
+    }
+
+    /**
+     * wrapper method for unCompress byte[] compInput.
+     *
+     * @return byte[].
+     */
+    public byte[] unCompress(byte[] compInput) {
+      try {
+        return Snappy.uncompress(compInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+      }
+      return compInput;
+    }
+  }
+
+  /**
+   * enum class for SnappyDoubleCompression.
+   */
+  public static enum SnappyDoubleCompression implements Compressor<double[]> {
+    /**
+     *
+     */
+    INSTANCE;
+
+    /**
+     * wrapper method for compressing double[] unCompInput.
+     */
+    public byte[] compress(double[] unCompInput) {
+      try {
+        return Snappy.compress(unCompInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+        return null;
+      }
+    }
+
+    /**
+     * wrapper method for unCompress byte[] compInput.
+     *
+     * @param compInput byte[].
+     * @return double[].
+     */
+    public double[] unCompress(byte[] compInput) {
+      try {
+        return Snappy.uncompressDoubleArray(compInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+      }
+      return null;
+    }
+
+  }
+
+  /**
+   * enum class for SnappyShortCompression.
+   *
+   * @author S71955
+   */
+  public static enum SnappyShortCompression implements Compressor<short[]> {
+    /**
+     *
+     */
+    INSTANCE;
+
+    /**
+     * wrapper method for compress short[] unCompInput.
+     *
+     * @param unCompInput short[].
+     * @return byte[].
+     */
+    public byte[] compress(short[] unCompInput) {
+      try {
+        return Snappy.compress(unCompInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+        return null;
+      }
+    }
+
+    /**
+     * wrapper method for uncompressShortArray.
+     *
+     * @param compInput byte[].
+     * @return short[].
+     */
+    public short[] unCompress(byte[] compInput) {
+      try {
+        return Snappy.uncompressShortArray(compInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+      }
+      return null;
+    }
+  }
+
+  /**
+   * enum class for SnappyIntCompression.
+   */
+  public static enum SnappyIntCompression implements Compressor<int[]> {
+    /**
+     *
+     */
+    INSTANCE;
+
+    /**
+     * wrapper method for compress int[] unCompInput.
+     *
+     * @param unCompInput int[].
+     * @return byte[].
+     */
+    public byte[] compress(int[] unCompInput) {
+      try {
+        return Snappy.compress(unCompInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+        return null;
+      }
+    }
+
+    /**
+     * wrapper method for uncompressIntArray.
+     *
+     * @param compInput byte[].
+     * @return int[].
+     */
+    public int[] unCompress(byte[] compInput) {
+      try {
+        return Snappy.uncompressIntArray(compInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+      }
+      return null;
+    }
+  }
+
+  /**
+   * enum class for SnappyLongCompression.
+   */
+  public static enum SnappyLongCompression implements Compressor<long[]> {
+    /**
+     *
+     */
+    INSTANCE;
+
+    /**
+     * wrapper method for compress long[] unCompInput.
+     *
+     * @param unCompInput long[].
+     * @return byte[].
+     */
+    public byte[] compress(long[] unCompInput) {
+      try {
+        return Snappy.compress(unCompInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+        return null;
+      }
+    }
+
+    /**
+     * wrapper method for uncompressLongArray.
+     *
+     * @param compInput byte[].
+     * @return long[].
+     */
+    public long[] unCompress(byte[] compInput) {
+      try {
+        return Snappy.uncompressLongArray(compInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+      }
+      return null;
+    }
+  }
+
+  /**
+   * enum class for SnappyFloatCompression.
+   */
+
+  public static enum SnappyFloatCompression implements Compressor<float[]> {
+    /**
+     *
+     */
+    INSTANCE;
+
+    /**
+     * wrapper method for compress float[] unCompInput.
+     *
+     * @param unCompInput float[].
+     * @return byte[].
+     */
+    public byte[] compress(float[] unCompInput) {
+      try {
+        return Snappy.compress(unCompInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+        return null;
+      }
+    }
+
+    /**
+     * wrapper method for uncompressFloatArray.
+     *
+     * @param compInput byte[].
+     * @return float[].
+     */
+    public float[] unCompress(byte[] compInput) {
+      try {
+        return Snappy.uncompressFloatArray(compInput);
+      } catch (IOException e) {
+        LOGGER.error(e, e.getMessage());
+      }
+      return null;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/ValueCompressionModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/ValueCompressionModel.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/ValueCompressionModel.java
new file mode 100644
index 0000000..94cbf19
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/ValueCompressionModel.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression;
+
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class ValueCompressionModel {
+  /**
+   * COMPRESSION_TYPE[] variable.
+   */
+  private ValueCompressionUtil.COMPRESSION_TYPE[] compType;
+
+  /**
+   * DataType[]  variable.
+   */
+  private ValueCompressionUtil.DataType[] changedDataType;
+  /**
+   * DataType[]  variable.
+   */
+  private ValueCompressionUtil.DataType[] actualDataType;
+
+  /**
+   * maxValue
+   */
+  private Object[] maxValue;
+  /**
+   * minValue.
+   */
+  private Object[] minValue;
+
+  private Object[] minValueFactForAgg;
+
+  /**
+   * uniqueValue
+   */
+  private Object[] uniqueValue;
+  /**
+   * decimal.
+   */
+  private int[] decimal;
+
+  /**
+   * aggType
+   */
+  private char[] type;
+
+  /**
+   * dataTypeSelected
+   */
+  private byte[] dataTypeSelected;
+  /**
+   * unCompressValues.
+   */
+  private ValueCompressonHolder.UnCompressValue[] unCompressValues;
+
+  /**
+   * @return the compType
+   */
+  public ValueCompressionUtil.COMPRESSION_TYPE[] getCompType() {
+    return compType;
+  }
+
+  /**
+   * @param compType the compType to set
+   */
+  public void setCompType(ValueCompressionUtil.COMPRESSION_TYPE[] compType) {
+    this.compType = compType;
+  }
+
+  /**
+   * @return the changedDataType
+   */
+  public ValueCompressionUtil.DataType[] getChangedDataType() {
+    return changedDataType;
+  }
+
+  /**
+   * @param changedDataType the changedDataType to set
+   */
+  public void setChangedDataType(ValueCompressionUtil.DataType[] changedDataType) {
+    this.changedDataType = changedDataType;
+  }
+
+  /**
+   * @return the actualDataType
+   */
+  public ValueCompressionUtil.DataType[] getActualDataType() {
+    return actualDataType;
+  }
+
+  /**
+   * @param actualDataType
+   */
+  public void setActualDataType(ValueCompressionUtil.DataType[] actualDataType) {
+    this.actualDataType = actualDataType;
+  }
+
+  /**
+   * @return the maxValue
+   */
+  public Object[] getMaxValue() {
+    return maxValue;
+  }
+
+  /**
+   * @param maxValue the maxValue to set
+   */
+  public void setMaxValue(Object[] maxValue) {
+    this.maxValue = maxValue;
+  }
+
+  /**
+   * @return the decimal
+   */
+  public int[] getDecimal() {
+    return decimal;
+  }
+
+  /**
+   * @param decimal the decimal to set
+   */
+  public void setDecimal(int[] decimal) {
+    this.decimal = decimal;
+  }
+
+  /**
+   * getUnCompressValues().
+   *
+   * @return the unCompressValues
+   */
+  public ValueCompressonHolder.UnCompressValue[] getUnCompressValues() {
+    return unCompressValues;
+  }
+
+  /**
+   * @param unCompressValues the unCompressValues to set
+   */
+  public void setUnCompressValues(ValueCompressonHolder.UnCompressValue[] unCompressValues) {
+    this.unCompressValues = unCompressValues;
+  }
+
+  /**
+   * getMinValue
+   *
+   * @return
+   */
+  public Object[] getMinValue() {
+    return minValue;
+  }
+
+  /**
+   * setMinValue.
+   *
+   * @param minValue
+   */
+  public void setMinValue(Object[] minValue) {
+    this.minValue = minValue;
+  }
+
+  /**
+   * @return the aggType
+   */
+  public char[] getType() {
+    return type;
+  }
+
+  /**
+   * @param type the type to set
+   */
+  public void setType(char[] type) {
+    this.type = type;
+  }
+
+  /**
+   * @return the dataTypeSelected
+   */
+  public byte[] getDataTypeSelected() {
+    return dataTypeSelected;
+  }
+
+  /**
+   * @param dataTypeSelected the dataTypeSelected to set
+   */
+  public void setDataTypeSelected(byte[] dataTypeSelected) {
+    this.dataTypeSelected = dataTypeSelected;
+  }
+
+  /**
+   * getUniqueValue
+   *
+   * @return
+   */
+  public Object[] getUniqueValue() {
+    return uniqueValue;
+  }
+
+  /**
+   * setUniqueValue
+   *
+   * @param uniqueValue
+   */
+  public void setUniqueValue(Object[] uniqueValue) {
+    this.uniqueValue = uniqueValue;
+  }
+
+  /**
+   * @return the minValueFactForAgg
+   */
+  public Object[] getMinValueFactForAgg() {
+    return minValueFactForAgg;
+  }
+
+  /**
+   * @param minValueFactForAgg the minValueFactForAgg to set
+   */
+  public void setMinValueFactForAgg(Object[] minValueFactForAgg) {
+    this.minValueFactForAgg = minValueFactForAgg;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/ValueCompressonHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/ValueCompressonHolder.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/ValueCompressonHolder.java
new file mode 100644
index 0000000..01764ce
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/ValueCompressonHolder.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression;
+
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+/**
+ * ValueCompressonHolder class.
+ */
+public final class ValueCompressonHolder {
+
+  /**
+   * byteCompressor.
+   */
+  private static Compressor<byte[]> byteCompressor =
+      SnappyCompression.SnappyByteCompression.INSTANCE;
+
+  /**
+   * shortCompressor.
+   */
+  private static Compressor<short[]> shortCompressor =
+      SnappyCompression.SnappyShortCompression.INSTANCE;
+
+  /**
+   * intCompressor.
+   */
+  private static Compressor<int[]> intCompressor = SnappyCompression.SnappyIntCompression.INSTANCE;
+
+  /**
+   * longCompressor.
+   */
+  private static Compressor<long[]> longCompressor =
+      SnappyCompression.SnappyLongCompression.INSTANCE;
+
+  /**
+   * floatCompressor
+   */
+  private static Compressor<float[]> floatCompressor =
+      SnappyCompression.SnappyFloatCompression.INSTANCE;
+  /**
+   * doubleCompressor.
+   */
+  private static Compressor<double[]> doubleCompressor =
+      SnappyCompression.SnappyDoubleCompression.INSTANCE;
+
+  private ValueCompressonHolder() {
+
+  }
+
+  /**
+   * @param dataType
+   * @param value
+   * @param data
+   */
+  public static void unCompress(DataType dataType, UnCompressValue value, byte[] data) {
+    switch (dataType) {
+      case DATA_BYTE:
+
+        value.setValue(byteCompressor.unCompress(data));
+        break;
+
+      case DATA_SHORT:
+
+        value.setValue(shortCompressor.unCompress(data));
+        break;
+
+      case DATA_INT:
+
+        value.setValue(intCompressor.unCompress(data));
+        break;
+
+      case DATA_LONG:
+      case DATA_BIGINT:
+
+        value.setValue(longCompressor.unCompress(data));
+        break;
+
+      case DATA_FLOAT:
+
+        value.setValue(floatCompressor.unCompress(data));
+        break;
+      default:
+
+        value.setValue(doubleCompressor.unCompress(data));
+        break;
+
+    }
+  }
+
+  /**
+   * interface for  UnCompressValue<T>.
+   *
+   * @param <T>
+   */
+
+  public interface UnCompressValue<T> extends Cloneable {
+    //        Object getValue(int index, int decimal, double maxValue);
+
+    void setValue(T value);
+
+    void setValueInBytes(byte[] value);
+
+    UnCompressValue<T> getNew();
+
+    UnCompressValue compress();
+
+    UnCompressValue uncompress(DataType dataType);
+
+    byte[] getBackArrayData();
+
+    UnCompressValue getCompressorObject();
+
+    CarbonReadDataHolder getValues(int decimal, Object maxValue);
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressByteArray.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressByteArray.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressByteArray.java
new file mode 100644
index 0000000..b5f7887
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressByteArray.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.DataTypeUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressByteArray implements ValueCompressonHolder.UnCompressValue<byte[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressMaxMinByte.class.getName());
+  /**
+   * byteCompressor.
+   */
+  private static Compressor<byte[]> byteCompressor =
+      SnappyCompression.SnappyByteCompression.INSTANCE;
+  private ByteArrayType arrayType;
+  /**
+   * value.
+   */
+  private byte[] value;
+
+  public UnCompressByteArray(ByteArrayType type) {
+    if (type == ByteArrayType.BYTE_ARRAY) {
+      arrayType = ByteArrayType.BYTE_ARRAY;
+    } else {
+      arrayType = ByteArrayType.BIG_DECIMAL;
+    }
+
+  }
+
+  @Override public void setValue(byte[] value) {
+    this.value = value;
+
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    this.value = value;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue<byte[]> getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressByteArray byte1 = new UnCompressByteArray(arrayType);
+    byte1.setValue(byteCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override
+  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
+    ValueCompressonHolder.UnCompressValue byte1 = new UnCompressByteArray(arrayType);
+    byte1.setValue(byteCompressor.unCompress(value));
+    return byte1;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return this.value;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressByteArray(arrayType);
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    List<byte[]> valsList = new ArrayList<byte[]>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    buffer.rewind();
+    int length = 0;
+    byte[] actualValue = null;
+    //CHECKSTYLE:OFF    Approval No:Approval-367
+    while (buffer.hasRemaining()) {//CHECKSTYLE:ON
+      length = buffer.getInt();
+      actualValue = new byte[length];
+      buffer.get(actualValue);
+      valsList.add(actualValue);
+
+    }
+    CarbonReadDataHolder holder = new CarbonReadDataHolder();
+    byte[][] value = new byte[valsList.size()][];
+    valsList.toArray(value);
+    if (arrayType == ByteArrayType.BIG_DECIMAL) {
+      BigDecimal[] bigDecimalValues = new BigDecimal[value.length];
+      for (int i = 0; i < value.length; i++) {
+        bigDecimalValues[i] = DataTypeUtil.byteToBigDecimal(value[i]);
+      }
+      holder.setReadableBigDecimalValues(bigDecimalValues);
+      return holder;
+    }
+    holder.setReadableByteValues(value);
+    return holder;
+  }
+
+  public static enum ByteArrayType {
+    BYTE_ARRAY,
+    BIG_DECIMAL
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressDefaultLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressDefaultLong.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressDefaultLong.java
new file mode 100644
index 0000000..b30932c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressDefaultLong.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+
+public class UnCompressDefaultLong extends UnCompressNoneLong {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressDefaultLong.class.getName());
+
+  public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException clnNotSupportedExc) {
+      LOGGER.error(clnNotSupportedExc,
+          clnNotSupportedExc.getMessage());
+    }
+    return null;
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    long[] vals = new long[value.length];
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i];
+    }
+    dataHolder.setReadableLongValues(vals);
+    return dataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByte.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByte.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByte.java
new file mode 100644
index 0000000..e6486c2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByte.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressMaxMinByte implements UnCompressValue<byte[]> {
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressMaxMinByte.class.getName());
+  /**
+   * byteCompressor.
+   */
+  private static Compressor<byte[]> byteCompressor =
+      SnappyCompression.SnappyByteCompression.INSTANCE;
+  /**
+   * value.
+   */
+  protected byte[] value;
+
+  //TODO SIMIAN
+
+  @Override public void setValue(byte[] value) {
+    this.value = value;
+
+  }
+
+  @Override public UnCompressValue getNew() {
+    try {
+      return (UnCompressValue) clone();
+    } catch (CloneNotSupportedException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return null;
+  }
+
+  @Override public UnCompressValue compress() {
+
+    UnCompressMaxMinByte byte1 = new UnCompressMaxMinByte();
+    byte1.setValue(byteCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public UnCompressValue uncompress(DataType dataType) {
+    UnCompressValue byte1 = ValueCompressionUtil.unCompressMaxMin(dataType, dataType);
+    ValueCompressonHolder.unCompress(dataType, byte1, value);
+    return byte1;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return value;
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    this.value = value;
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public UnCompressValue getCompressorObject() {
+    return new UnCompressMaxMinByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = maxValue - value[i];
+      }
+    }
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByteForLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByteForLong.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByteForLong.java
new file mode 100644
index 0000000..c265a44
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinByteForLong.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressMaxMinByteForLong extends UnCompressMaxMinByte {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressMaxMinByteForLong.class.getName());
+  private static Compressor<byte[]> byteCompressor =
+      SnappyCompression.SnappyByteCompression.INSTANCE;
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+
+    UnCompressMaxMinByteForLong byte1 = new UnCompressMaxMinByteForLong();
+    byte1.setValue(byteCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override
+  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
+    ValueCompressonHolder.UnCompressValue byte1 =
+        ValueCompressionUtil.unCompressMaxMin(dataType, dataType);
+    ValueCompressonHolder.unCompress(dataType, byte1, value);
+    return byte1;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressMaxMinByteForLong();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    long maxValue = (long) maxValueObject;
+    long[] vals = new long[value.length];
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = maxValue - value[i];
+      }
+    }
+    dataHolder.setReadableLongValues(vals);
+    return dataHolder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefault.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefault.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefault.java
new file mode 100644
index 0000000..df72c61
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefault.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressMaxMinDefault implements ValueCompressonHolder.UnCompressValue<double[]> {
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressMaxMinDefault.class.getName());
+
+  /**
+   * doubleCompressor.
+   */
+  private static Compressor<double[]> doubleCompressor =
+      SnappyCompression.SnappyDoubleCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private double[] value;
+
+  @Override public void setValue(double[] value) {
+    this.value = (double[]) value;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException ex5) {
+      LOGGER.error(ex5, ex5.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressMaxMinByte byte1 = new UnCompressMaxMinByte();
+    byte1.setValue(doubleCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToDoubleArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressMaxMinByte();
+  }
+
+  //TODO SIMIAN
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder dataHolderInfoObj = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = maxValue - value[i];
+      }
+
+    }
+    dataHolderInfoObj.setReadableDoubleValues(vals);
+    return dataHolderInfoObj;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefaultLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefaultLong.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefaultLong.java
new file mode 100644
index 0000000..57a25eb
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinDefaultLong.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressMaxMinDefaultLong extends UnCompressMaxMinLong {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressMaxMinDefaultLong.class.getName());
+  private static Compressor<long[]> longCompressor =
+      SnappyCompression.SnappyLongCompression.INSTANCE;
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException ex5) {
+      LOGGER.error(ex5, ex5.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressMaxMinByteForLong byte1 = new UnCompressMaxMinByteForLong();
+    byte1.setValue(longCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressMaxMinByteForLong();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    long maxValue = (long) maxValueObject;
+    long[] vals = new long[value.length];
+    CarbonReadDataHolder dataHolderInfoObj = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = maxValue - value[i];
+      }
+
+    }
+    dataHolderInfoObj.setReadableLongValues(vals);
+    return dataHolderInfoObj;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinFloat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinFloat.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinFloat.java
new file mode 100644
index 0000000..396c5c0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinFloat.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressMaxMinFloat implements UnCompressValue<float[]> {
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressMaxMinFloat.class.getName());
+  /**
+   * floatCompressor
+   */
+  private static Compressor<float[]> floatCompressor =
+      SnappyCompression.SnappyFloatCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private float[] value;
+
+  @Override public void setValue(float[] value) {
+    this.value = (float[]) value;
+
+  }
+
+  @Override public UnCompressValue getNew() {
+    try {
+      return (UnCompressValue) clone();
+    } catch (CloneNotSupportedException ex4) {
+      LOGGER.error(ex4, ex4.getMessage());
+    }
+    return null;
+  }
+
+  @Override public UnCompressValue compress() {
+
+    UnCompressMaxMinByte byte1 = new UnCompressMaxMinByte();
+    byte1.setValue(floatCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public UnCompressValue uncompress(DataType dTypeVal) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToFloatArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public UnCompressValue getCompressorObject() {
+    return new UnCompressMaxMinByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder dataHolderVal = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = maxValue - value[i];
+      }
+
+    }
+    dataHolderVal.setReadableDoubleValues(vals);
+    return dataHolderVal;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinInt.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinInt.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinInt.java
new file mode 100644
index 0000000..a2af61d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinInt.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressMaxMinInt implements ValueCompressonHolder.UnCompressValue<int[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressMaxMinInt.class.getName());
+
+  /**
+   * intCompressor.
+   */
+  private static Compressor<int[]> intCompressor = SnappyCompression.SnappyIntCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private int[] value;
+
+  @Override public void setValue(int[] value) {
+    this.value = value;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressMaxMinByte byte1 = new UnCompressMaxMinByte();
+    byte1.setValue(intCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue uncompress(
+      ValueCompressionUtil.DataType dataTypeValue) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToIntArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressMaxMinByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decVal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = maxValue - value[i];
+      }
+
+    }
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinLong.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinLong.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinLong.java
new file mode 100644
index 0000000..cab3aa2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinLong.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public class UnCompressMaxMinLong implements ValueCompressonHolder.UnCompressValue<long[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressMaxMinLong.class.getName());
+  /**
+   * longCompressor.
+   */
+  private static Compressor<long[]> longCompressor =
+      SnappyCompression.SnappyLongCompression.INSTANCE;
+  /**
+   * value.
+   */
+  protected long[] value;
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressMaxMinByte unCompressByte = new UnCompressMaxMinByte();
+    unCompressByte.setValue(longCompressor.compress(value));
+    return unCompressByte;
+  }
+
+  @Override public void setValue(long[] value) {
+    this.value = value;
+
+  }
+
+  @Override
+  public ValueCompressonHolder.UnCompressValue uncompress(ValueCompressionUtil.DataType dataType) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToLongArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressMaxMinByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder data = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = maxValue - value[i];
+      }
+
+    }
+    data.setReadableDoubleValues(vals);
+    return data;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinShort.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinShort.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinShort.java
new file mode 100644
index 0000000..884c430
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressMaxMinShort.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressMaxMinShort implements ValueCompressonHolder.UnCompressValue<short[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressMaxMinShort.class.getName());
+  /**
+   * shortCompressor.
+   */
+  private static Compressor<short[]> shortCompressor =
+      SnappyCompression.SnappyShortCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private short[] value;
+
+  @Override public void setValue(short[] value) {
+    this.value = value;
+
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
+    return null;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return ValueCompressionUtil.convertToBytes(value);
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException ex3) {
+      LOGGER.error(ex3, ex3.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+
+    UnCompressMaxMinByte byte1 = new UnCompressMaxMinByte();
+    byte1.setValue(shortCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    ByteBuffer buffer = ByteBuffer.wrap(value);
+    this.value = ValueCompressionUtil.convertToShortArray(buffer, value.length);
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressMaxMinByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double maxValue = (double) maxValueObject;
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder carbonDataHolderObj = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      if (value[i] == 0) {
+        vals[i] = maxValue;
+      } else {
+        vals[i] = maxValue - value[i];
+      }
+
+    }
+    carbonDataHolderObj.setReadableDoubleValues(vals);
+    return carbonDataHolderObj;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalByte.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalByte.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalByte.java
new file mode 100644
index 0000000..7a81789
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/compression/type/UnCompressNonDecimalByte.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.compression.type;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+import org.apache.carbondata.core.util.ValueCompressionUtil.DataType;
+
+public class UnCompressNonDecimalByte implements ValueCompressonHolder.UnCompressValue<byte[]> {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(UnCompressNonDecimalByte.class.getName());
+  /**
+   * byteCompressor.
+   */
+  private static Compressor<byte[]> byteCompressor =
+      SnappyCompression.SnappyByteCompression.INSTANCE;
+  /**
+   * value.
+   */
+  private byte[] value;
+
+  @Override public void setValue(byte[] value) {
+    this.value = value;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue getNew() {
+    try {
+      return (ValueCompressonHolder.UnCompressValue) clone();
+    } catch (CloneNotSupportedException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return null;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue compress() {
+    UnCompressNonDecimalByte byte1 = new UnCompressNonDecimalByte();
+    byte1.setValue(byteCompressor.compress(value));
+    return byte1;
+  }
+
+  @Override public ValueCompressonHolder.UnCompressValue uncompress(DataType dataType) {
+    ValueCompressonHolder.UnCompressValue byte1 =
+        ValueCompressionUtil.unCompressNonDecimal(dataType, dataType);
+    ValueCompressonHolder.unCompress(dataType, byte1, value);
+    return byte1;
+  }
+
+  @Override public void setValueInBytes(byte[] value) {
+    this.value = value;
+  }
+
+  @Override public byte[] getBackArrayData() {
+    return value;
+  }
+
+  /**
+   * @see ValueCompressonHolder.UnCompressValue#getCompressorObject()
+   */
+  @Override public ValueCompressonHolder.UnCompressValue getCompressorObject() {
+    return new UnCompressNonDecimalByte();
+  }
+
+  @Override public CarbonReadDataHolder getValues(int decimal, Object maxValueObject) {
+    double[] vals = new double[value.length];
+    CarbonReadDataHolder dataHolder = new CarbonReadDataHolder();
+    for (int i = 0; i < vals.length; i++) {
+      vals[i] = value[i] / Math.pow(10, decimal);
+    }
+    dataHolder.setReadableDoubleValues(vals);
+    return dataHolder;
+  }
+}


[09/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortInfo.java b/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortInfo.java
deleted file mode 100644
index a1be6cb..0000000
--- a/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortInfo.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.writer.sortindex;
-
-import java.util.List;
-
-/**
- * Model to hold the sortIndex and sortIndexInverted data
- */
-public class CarbonDictionarySortInfo {
-  /**
-   * Sort index after members are sorted
-   */
-  private List<Integer> sortIndex;
-  /**
-   * inverted sort index to get the member
-   */
-  private List<Integer> sortIndexInverted;
-
-  /**
-   * The constructor to instantiate the CarbonDictionarySortInfo object
-   * with sortIndex and sortInverted Index data
-   *
-   * @param sortIndex
-   * @param sortIndexInverted
-   */
-  public CarbonDictionarySortInfo(List<Integer> sortIndex, List<Integer> sortIndexInverted) {
-    this.sortIndex = sortIndex;
-    this.sortIndexInverted = sortIndexInverted;
-  }
-
-  /**
-   * return list of sortIndex
-   *
-   * @return
-   */
-  public List<Integer> getSortIndex() {
-    return sortIndex;
-  }
-
-  /**
-   * returns list of sortindexinverted
-   *
-   * @return
-   */
-  public List<Integer> getSortIndexInverted() {
-    return sortIndexInverted;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortInfoPreparator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortInfoPreparator.java b/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortInfoPreparator.java
deleted file mode 100644
index 595d12c..0000000
--- a/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortInfoPreparator.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.writer.sortindex;
-
-import java.nio.charset.Charset;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.cache.dictionary.DictionaryChunksWrapper;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.CarbonUtilException;
-
-import org.apache.commons.lang.ArrayUtils;
-
-/**
- * The class prepares the column sort info ie sortIndex
- * and inverted sort index info
- */
-public class CarbonDictionarySortInfoPreparator {
-
-  /**
-   * The method returns the column Sort Info
-   *
-   * @param newDistinctValues new distinct value to be added
-   * @param dictionary        old distinct values
-   * @param dataType          DataType of columns
-   * @return CarbonDictionarySortInfo returns the column Sort Info
-   * @throws CarbonUtilException
-   */
-  public CarbonDictionarySortInfo getDictionarySortInfo(List<String> newDistinctValues,
-      Dictionary dictionary, DataType dataType) throws CarbonUtilException {
-    CarbonDictionarySortModel[] dictionarySortModels =
-        prepareDictionarySortModels(newDistinctValues, dictionary, dataType);
-    return createColumnSortInfo(dictionarySortModels);
-  }
-
-  /**
-   * The method prepares the sort_index and sort_index_inverted data
-   *
-   * @param dictionarySortModels
-   */
-  private CarbonDictionarySortInfo createColumnSortInfo(
-      CarbonDictionarySortModel[] dictionarySortModels) {
-
-    //Sort index after members are sorted
-    int[] sortIndex;
-    //inverted sort index to get the member
-    int[] sortIndexInverted;
-
-    Arrays.sort(dictionarySortModels);
-    sortIndex = new int[dictionarySortModels.length];
-    sortIndexInverted = new int[dictionarySortModels.length];
-
-    for (int i = 0; i < dictionarySortModels.length; i++) {
-      CarbonDictionarySortModel dictionarySortModel = dictionarySortModels[i];
-      sortIndex[i] = dictionarySortModel.getKey();
-      // the array index starts from 0 therefore -1 is done to avoid wastage
-      // of 0th index in array and surrogate key starts from 1 there 1 is added to i
-      // which is a counter starting from 0
-      sortIndexInverted[dictionarySortModel.getKey() - 1] = i + 1;
-    }
-    dictionarySortModels = null;
-    List<Integer> sortIndexList = convertToList(sortIndex);
-    List<Integer> sortIndexInvertedList = convertToList(sortIndexInverted);
-    return new CarbonDictionarySortInfo(sortIndexList, sortIndexInvertedList);
-  }
-
-  /**
-   * The method converts the int[] to List<Integer>
-   *
-   * @param data
-   * @return
-   */
-  private List<Integer> convertToList(int[] data) {
-    Integer[] wrapperType = ArrayUtils.toObject(data);
-    return Arrays.asList(wrapperType);
-  }
-
-  /**
-   * The method returns the array of CarbonDictionarySortModel
-   *
-   * @param distinctValues new distinct values
-   * @param dictionary The wrapper wraps the list<list<bye[]>> and provide the
-   *                   iterator to retrieve the chunks members.
-   * @param dataType   DataType of columns
-   * @return CarbonDictionarySortModel[] CarbonDictionarySortModel[] the model
-   * CarbonDictionarySortModel contains the  member's surrogate and
-   * its byte value
-   */
-  private CarbonDictionarySortModel[] prepareDictionarySortModels(List<String> distinctValues,
-      Dictionary dictionary, DataType dataType) {
-    CarbonDictionarySortModel[] dictionarySortModels = null;
-    //The wrapper wraps the list<list<bye[]>> and provide the iterator to
-    // retrieve the chunks members.
-    int surrogate = 1;
-    if (null != dictionary) {
-      DictionaryChunksWrapper dictionaryChunksWrapper = dictionary.getDictionaryChunks();
-      dictionarySortModels =
-          new CarbonDictionarySortModel[dictionaryChunksWrapper.getSize() + distinctValues.size()];
-      while (dictionaryChunksWrapper.hasNext()) {
-        dictionarySortModels[surrogate - 1] =
-            createDictionarySortModel(surrogate, dataType, dictionaryChunksWrapper.next());
-        surrogate++;
-      }
-    } else {
-      dictionarySortModels = new CarbonDictionarySortModel[distinctValues.size()];
-    }
-    // for new distinct values
-    Iterator<String> distinctValue = distinctValues.iterator();
-    while (distinctValue.hasNext()) {
-      dictionarySortModels[surrogate - 1] =
-          createDictionarySortModel(surrogate, dataType, distinctValue.next().getBytes());
-      surrogate++;
-    }
-    return dictionarySortModels;
-  }
-
-  /**
-   *
-   * @param surrogate
-   * @param dataType
-   * @param value member value
-   * @return CarbonDictionarySortModel
-   */
-  private CarbonDictionarySortModel createDictionarySortModel(int surrogate, DataType dataType,
-      byte[] value) {
-    String memberValue = new String(value, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
-    return new CarbonDictionarySortModel(surrogate, dataType, memberValue);
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortModel.java b/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortModel.java
deleted file mode 100644
index 8e66cc7..0000000
--- a/core/src/main/java/org/carbondata/core/writer/sortindex/CarbonDictionarySortModel.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.writer.sortindex;
-
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.CarbonProperties;
-
-/**
- * Dictionary sort model class holds the member byte value and corresponding key value.
- */
-public class CarbonDictionarySortModel implements Comparable<CarbonDictionarySortModel> {
-
-  /**
-   * Surrogate key
-   */
-  private int key;
-
-  /**
-   * member value in bytes
-   */
-  private String memberValue;
-
-  /**
-   * member dataType
-   */
-  private DataType dataType;
-
-  /**
-   * Constructor to init the dictionary sort model
-   *
-   * @param key
-   * @param dataType
-   * @param memberValue
-   */
-  public CarbonDictionarySortModel(int key, DataType dataType, String memberValue) {
-    this.key = key;
-    this.dataType = dataType;
-    this.memberValue = memberValue;
-  }
-
-  /**
-   * Compare
-   */
-  @Override public int compareTo(CarbonDictionarySortModel o) {
-    switch (dataType) {
-      case SHORT:
-      case INT:
-      case LONG:
-      case DOUBLE:
-
-        Double d1 = null;
-        Double d2 = null;
-        try {
-          d1 = new Double(memberValue);
-        } catch (NumberFormatException e) {
-          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(o.memberValue)) {
-            return -1;
-          }
-          return 1;
-        }
-        try {
-          d2 = new Double(o.memberValue);
-        } catch (NumberFormatException e) {
-          return -1;
-        }
-        return d1.compareTo(d2);
-      case DECIMAL:
-        java.math.BigDecimal val1 = null;
-        java.math.BigDecimal val2 = null;
-        try {
-          val1 = new java.math.BigDecimal(memberValue);
-        } catch (NumberFormatException e) {
-          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(o.memberValue)) {
-            return -1;
-          }
-          return 1;
-        }
-        try {
-          val2 = new java.math.BigDecimal(o.memberValue);
-        } catch (NumberFormatException e) {
-          return -1;
-        }
-        return val1.compareTo(val2);
-      case TIMESTAMP:
-        SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
-            .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-                CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-        Date date1 = null;
-        Date date2 = null;
-        try {
-          date1 = parser.parse(memberValue);
-        } catch (ParseException e) {
-          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(o.memberValue)) {
-            return -1;
-          }
-          return 1;
-        }
-        try {
-          date2 = parser.parse(o.memberValue);
-        } catch (ParseException e) {
-          return -1;
-        }
-        return date1.compareTo(date2);
-      case STRING:
-      default:
-        return this.memberValue.compareTo(o.memberValue);
-    }
-  }
-
-  /**
-   * @see Object#hashCode()
-   */
-  @Override public int hashCode() {
-    int result = ((memberValue == null) ? 0 : memberValue.hashCode());
-    return result;
-  }
-
-  /**
-   * @see Object#equals(Object)
-   */
-  @Override public boolean equals(Object obj) {
-    if (obj instanceof CarbonDictionarySortModel) {
-      if (this == obj) {
-        return true;
-      }
-      CarbonDictionarySortModel other = (CarbonDictionarySortModel) obj;
-      if (memberValue == null) {
-        if (other.memberValue != null) {
-          return false;
-        }
-      } else if (!this.memberValue.equals(other.memberValue)) {
-        return false;
-      }
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  /**
-   * return the surrogate of the member
-   *
-   * @return
-   */
-  public int getKey() {
-    return key;
-  }
-
-  /**
-   * Returns member buye
-   *
-   * @return
-   */
-  public String getMemberValue() {
-    return memberValue;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/collector/ScannedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/collector/ScannedResultCollector.java b/core/src/main/java/org/carbondata/scan/collector/ScannedResultCollector.java
deleted file mode 100644
index 424bd86..0000000
--- a/core/src/main/java/org/carbondata/scan/collector/ScannedResultCollector.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.collector;
-
-import java.util.List;
-
-import org.carbondata.scan.result.AbstractScannedResult;
-
-/**
- * Interface which will be used to aggregate the scan result
- */
-public interface ScannedResultCollector {
-
-  /**
-   * Below method will be used to aggregate the scanned result
-   *
-   * @param scannedResult scanned result
-   * @return how many records was aggregated
-   */
-  List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/collector/impl/AbstractScannedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/collector/impl/AbstractScannedResultCollector.java b/core/src/main/java/org/carbondata/scan/collector/impl/AbstractScannedResultCollector.java
deleted file mode 100644
index 81d7b6a..0000000
--- a/core/src/main/java/org/carbondata/scan/collector/impl/AbstractScannedResultCollector.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.collector.impl;
-
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.keygenerator.KeyGenException;
-import org.carbondata.core.util.DataTypeUtil;
-import org.carbondata.scan.collector.ScannedResultCollector;
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-import org.carbondata.scan.executor.util.QueryUtil;
-import org.carbondata.scan.result.AbstractScannedResult;
-import org.carbondata.scan.wrappers.ByteArrayWrapper;
-
-/**
- * It is not a collector it is just a scanned result holder.
- */
-public abstract class AbstractScannedResultCollector implements ScannedResultCollector {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(AbstractScannedResultCollector.class.getName());
-
-  /**
-   * restructuring info
-   */
-  private KeyStructureInfo restructureInfos;
-
-  /**
-   * table block execution infos
-   */
-  protected BlockExecutionInfo tableBlockExecutionInfos;
-
-  /**
-   * Measure ordinals
-   */
-  protected int[] measuresOrdinal;
-
-  /**
-   * to check whether measure exists in current table block or not this to
-   * handle restructuring scenario
-   */
-  protected boolean[] isMeasureExistsInCurrentBlock;
-
-  /**
-   * default value of the measures in case of restructuring some measure wont
-   * be present in the table so in that default value will be used to
-   * aggregate the data for that measure columns
-   */
-  private Object[] measureDefaultValue;
-
-  /**
-   * measure datatypes.
-   */
-  protected DataType[] measureDatatypes;
-
-  public AbstractScannedResultCollector(BlockExecutionInfo blockExecutionInfos) {
-    this.tableBlockExecutionInfos = blockExecutionInfos;
-    restructureInfos = blockExecutionInfos.getKeyStructureInfo();
-    measuresOrdinal = tableBlockExecutionInfos.getAggregatorInfo().getMeasureOrdinals();
-    isMeasureExistsInCurrentBlock = tableBlockExecutionInfos.getAggregatorInfo().getMeasureExists();
-    measureDefaultValue = tableBlockExecutionInfos.getAggregatorInfo().getDefaultValues();
-    this.measureDatatypes = tableBlockExecutionInfos.getAggregatorInfo().getMeasureDataTypes();
-  }
-
-  protected void fillMeasureData(Object[] msrValues, int offset,
-      AbstractScannedResult scannedResult) {
-    for (short i = 0; i < measuresOrdinal.length; i++) {
-      // if measure exists is block then pass measure column
-      // data chunk to the collector
-      if (isMeasureExistsInCurrentBlock[i]) {
-        msrValues[i + offset] = getMeasureData(scannedResult.getMeasureChunk(measuresOrdinal[i]),
-            scannedResult.getCurrenrRowId(), measureDatatypes[i]);
-      } else {
-        // if not then get the default value and use that value in aggregation
-        msrValues[i + offset] = measureDefaultValue[i];
-      }
-    }
-  }
-
-  private Object getMeasureData(MeasureColumnDataChunk dataChunk, int index, DataType dataType) {
-    if (!dataChunk.getNullValueIndexHolder().getBitSet().get(index)) {
-      Object msrVal;
-      switch (dataType) {
-        case INT:
-        case LONG:
-          msrVal = dataChunk.getMeasureDataHolder().getReadableLongValueByIndex(index);
-          break;
-        case DECIMAL:
-          msrVal = dataChunk.getMeasureDataHolder().getReadableBigDecimalValueByIndex(index);
-          break;
-        default:
-          msrVal = dataChunk.getMeasureDataHolder().getReadableDoubleValueByIndex(index);
-      }
-      return DataTypeUtil.getMeasureDataBasedOnDataType(msrVal, dataType);
-    }
-    return null;
-  }
-
-  /**
-   * Below method will used to get the result
-   */
-  protected void updateData(List<Object[]> listBasedResult) {
-    if (tableBlockExecutionInfos.isFixedKeyUpdateRequired()) {
-      updateKeyWithLatestBlockKeygenerator(listBasedResult);
-    }
-  }
-
-  /**
-   * Below method will be used to update the fixed length key with the
-   * latest block key generator
-   *
-   * @return updated block
-   */
-  private void updateKeyWithLatestBlockKeygenerator(List<Object[]> listBasedResult) {
-    try {
-      long[] data = null;
-      ByteArrayWrapper key = null;
-      for (int i = 0; i < listBasedResult.size(); i++) {
-        // get the key
-        key = (ByteArrayWrapper)listBasedResult.get(i)[0];
-        // unpack the key with table block key generator
-        data = tableBlockExecutionInfos.getBlockKeyGenerator()
-            .getKeyArray(key.getDictionaryKey(), tableBlockExecutionInfos.getMaskedByteForBlock());
-        // packed the key with latest block key generator
-        // and generate the masked key for that key
-        key.setDictionaryKey(QueryUtil
-            .getMaskedKey(restructureInfos.getKeyGenerator().generateKey(data),
-                restructureInfos.getMaxKey(), restructureInfos.getMaskByteRanges(),
-                restructureInfos.getMaskByteRanges().length));
-      }
-    } catch (KeyGenException e) {
-      LOGGER.error(e);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/collector/impl/DictionaryBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/collector/impl/DictionaryBasedResultCollector.java b/core/src/main/java/org/carbondata/scan/collector/impl/DictionaryBasedResultCollector.java
deleted file mode 100644
index 89a7d9d..0000000
--- a/core/src/main/java/org/carbondata/scan/collector/impl/DictionaryBasedResultCollector.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.collector.impl;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.core.util.DataTypeUtil;
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.filter.GenericQueryType;
-import org.carbondata.scan.model.QueryDimension;
-import org.carbondata.scan.model.QueryMeasure;
-import org.carbondata.scan.result.AbstractScannedResult;
-
-/**
- * It is not a collector it is just a scanned result holder.
- */
-public class DictionaryBasedResultCollector extends AbstractScannedResultCollector {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(DictionaryBasedResultCollector.class.getName());
-
-  public DictionaryBasedResultCollector(BlockExecutionInfo blockExecutionInfos) {
-    super(blockExecutionInfos);
-  }
-
-  /**
-   * This method will add a record both key and value to list object
-   * it will keep track of how many record is processed, to handle limit scenario
-   */
-  @Override public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {
-    List<Object[]> listBasedResult = new ArrayList<>(batchSize);
-    boolean isMsrsPresent = measureDatatypes.length > 0;
-    QueryDimension[] queryDimensions = tableBlockExecutionInfos.getQueryDimensions();
-    QueryMeasure[] queryMeasures = tableBlockExecutionInfos.getQueryMeasures();
-    Map<Integer, GenericQueryType> comlexDimensionInfoMap =
-        tableBlockExecutionInfos.getComlexDimensionInfoMap();
-    boolean[] dictionaryEncodingArray = CarbonUtil.getDictionaryEncodingArray(queryDimensions);
-    boolean[] directDictionaryEncodingArray =
-        CarbonUtil.getDirectDictionaryEncodingArray(queryDimensions);
-    boolean[] complexDataTypeArray = CarbonUtil.getComplexDataTypeArray(queryDimensions);
-    int dimSize = queryDimensions.length;
-    boolean isDimensionsExist = dimSize > 0;
-    int[] order = new int[dimSize + queryMeasures.length];
-    for (int i = 0; i < dimSize; i++) {
-      order[i] = queryDimensions[i].getQueryOrder();
-    }
-    for (int i = 0; i < queryMeasures.length; i++) {
-      order[i + dimSize] = queryMeasures[i].getQueryOrder();
-    }
-    // scan the record and add to list
-    int rowCounter = 0;
-    int dictionaryColumnIndex = 0;
-    int noDictionaryColumnIndex = 0;
-    int complexTypeColumnIndex = 0;
-    int[] surrogateResult;
-    String[] noDictionaryKeys;
-    byte[][] complexTypeKeyArray;
-    while (scannedResult.hasNext() && rowCounter < batchSize) {
-      Object[] row = new Object[dimSize + queryMeasures.length];
-      if (isDimensionsExist) {
-        surrogateResult = scannedResult.getDictionaryKeyIntegerArray();
-        noDictionaryKeys = scannedResult.getNoDictionaryKeyStringArray();
-        complexTypeKeyArray = scannedResult.getComplexTypeKeyArray();
-        dictionaryColumnIndex = 0;
-        noDictionaryColumnIndex = 0;
-        complexTypeColumnIndex = 0;
-        for (int i = 0; i < dimSize; i++) {
-          if (!dictionaryEncodingArray[i]) {
-            row[order[i]] = DataTypeUtil
-                .getDataBasedOnDataType(noDictionaryKeys[noDictionaryColumnIndex++],
-                    queryDimensions[i].getDimension().getDataType());
-          } else if (directDictionaryEncodingArray[i]) {
-            DirectDictionaryGenerator directDictionaryGenerator =
-                DirectDictionaryKeyGeneratorFactory
-                    .getDirectDictionaryGenerator(queryDimensions[i].getDimension().getDataType());
-            if (directDictionaryGenerator != null) {
-              row[order[i]] = directDictionaryGenerator.getValueFromSurrogate(
-                  surrogateResult[dictionaryColumnIndex++]);
-            }
-          } else if (complexDataTypeArray[i]) {
-            row[order[i]] = comlexDimensionInfoMap
-                .get(queryDimensions[i].getDimension().getOrdinal())
-                .getDataBasedOnDataTypeFromSurrogates(
-                    ByteBuffer.wrap(complexTypeKeyArray[complexTypeColumnIndex++]));
-          } else {
-            row[order[i]] = surrogateResult[dictionaryColumnIndex++];
-          }
-        }
-
-      } else {
-        scannedResult.incrementCounter();
-      }
-      if (isMsrsPresent) {
-        Object[] msrValues = new Object[measureDatatypes.length];
-        fillMeasureData(msrValues, 0, scannedResult);
-        for (int i = 0; i < msrValues.length; i++) {
-          row[order[i + dimSize]] = msrValues[i];
-        }
-      }
-      listBasedResult.add(row);
-      rowCounter++;
-    }
-    return listBasedResult;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/collector/impl/RawBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/collector/impl/RawBasedResultCollector.java b/core/src/main/java/org/carbondata/scan/collector/impl/RawBasedResultCollector.java
deleted file mode 100644
index 5bb0567..0000000
--- a/core/src/main/java/org/carbondata/scan/collector/impl/RawBasedResultCollector.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.collector.impl;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.model.QueryMeasure;
-import org.carbondata.scan.result.AbstractScannedResult;
-import org.carbondata.scan.wrappers.ByteArrayWrapper;
-
-/**
- * It is not a collector it is just a scanned result holder.
- */
-public class RawBasedResultCollector extends AbstractScannedResultCollector {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(RawBasedResultCollector.class.getName());
-
-  public RawBasedResultCollector(BlockExecutionInfo blockExecutionInfos) {
-    super(blockExecutionInfos);
-  }
-
-  /**
-   * This method will add a record both key and value to list object
-   * it will keep track of how many record is processed, to handle limit scenario
-   */
-  @Override public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {
-    List<Object[]> listBasedResult = new ArrayList<>(batchSize);
-    QueryMeasure[] queryMeasures = tableBlockExecutionInfos.getQueryMeasures();
-    ByteArrayWrapper wrapper = null;
-    // scan the record and add to list
-    int rowCounter = 0;
-    while (scannedResult.hasNext() && rowCounter < batchSize) {
-      Object[] row = new Object[1 + queryMeasures.length];
-      wrapper = new ByteArrayWrapper();
-      wrapper.setDictionaryKey(scannedResult.getDictionaryKeyArray());
-      wrapper.setNoDictionaryKeys(scannedResult.getNoDictionaryKeyArray());
-      wrapper.setComplexTypesKeys(scannedResult.getComplexTypeKeyArray());
-      row[0] = wrapper;
-      fillMeasureData(row, 1, scannedResult);
-      listBasedResult.add(row);
-      rowCounter++;
-    }
-    updateData(listBasedResult);
-    return listBasedResult;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/complextypes/ArrayQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/complextypes/ArrayQueryType.java b/core/src/main/java/org/carbondata/scan/complextypes/ArrayQueryType.java
deleted file mode 100644
index 34d3d89..0000000
--- a/core/src/main/java/org/carbondata/scan/complextypes/ArrayQueryType.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.complextypes;
-
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.scan.filter.GenericQueryType;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-import org.apache.spark.sql.catalyst.util.*; // Don't remove it, used for spark 1.6 compatability
-import org.apache.spark.sql.types.*;
-
-public class ArrayQueryType extends ComplexQueryType implements GenericQueryType {
-
-  private GenericQueryType children;
-  private int keyOrdinalForQuery;
-
-  public ArrayQueryType(String name, String parentname, int blockIndex) {
-    super(name, parentname, blockIndex);
-  }
-
-  @Override public void addChildren(GenericQueryType children) {
-    if (this.getName().equals(children.getParentname())) {
-      this.children = children;
-    } else {
-      this.children.addChildren(children);
-    }
-  }
-
-  @Override public String getName() {
-    return name;
-  }
-
-  @Override public void setName(String name) {
-    this.name = name;
-  }
-
-  @Override public String getParentname() {
-    return parentname;
-  }
-
-  @Override public void setParentname(String parentname) {
-    this.parentname = parentname;
-
-  }
-
-  @Override public void getAllPrimitiveChildren(List<GenericQueryType> primitiveChild) {
-    if (children instanceof PrimitiveQueryType) {
-      primitiveChild.add(children);
-    } else {
-      children.getAllPrimitiveChildren(primitiveChild);
-    }
-  }
-
-  public void parseBlocksAndReturnComplexColumnByteArray(
-      DimensionColumnDataChunk[] dimensionColumnDataChunks, int rowNumber,
-      DataOutputStream dataOutputStream) throws IOException {
-    byte[] input = new byte[8];
-    copyBlockDataChunk(dimensionColumnDataChunks, rowNumber, input);
-    ByteBuffer byteArray = ByteBuffer.wrap(input);
-    int dataLength = byteArray.getInt();
-    dataOutputStream.writeInt(dataLength);
-    if (dataLength == 0) {
-      // b.putInt(0);
-    } else {
-      int columnIndex = byteArray.getInt();
-      for (int i = 0; i < dataLength; i++) {
-        children
-            .parseBlocksAndReturnComplexColumnByteArray(dimensionColumnDataChunks, columnIndex++,
-                dataOutputStream);
-      }
-    }
-  }
-
-  @Override public int getSurrogateIndex() {
-    return 0;
-  }
-
-  @Override public void setSurrogateIndex(int surrIndex) {
-
-  }
-
-  @Override public int getBlockIndex() {
-    return blockIndex;
-  }
-
-  @Override public void setBlockIndex(int blockIndex) {
-    this.blockIndex = blockIndex;
-  }
-
-  @Override public int getColsCount() {
-    return children.getColsCount() + 1;
-  }
-
-  @Override public void parseAndGetResultBytes(ByteBuffer complexData, DataOutputStream dataOutput)
-      throws IOException {
-    int dataLength = complexData.getInt();
-    dataOutput.writeInt(dataLength);
-    for (int i = 0; i < dataLength; i++) {
-      children.parseAndGetResultBytes(complexData, dataOutput);
-    }
-  }
-
-  @Override public void setKeySize(int[] keyBlockSize) {
-    children.setKeySize(keyBlockSize);
-  }
-
-  @Override public DataType getSchemaType() {
-    return new ArrayType(null, true);
-  }
-
-  @Override public int getKeyOrdinalForQuery() {
-    return keyOrdinalForQuery;
-  }
-
-  @Override public void setKeyOrdinalForQuery(int keyOrdinalForQuery) {
-    this.keyOrdinalForQuery = keyOrdinalForQuery;
-  }
-
-  @Override public void fillRequiredBlockData(BlocksChunkHolder blockChunkHolder) {
-    readBlockDataChunk(blockChunkHolder);
-    children.fillRequiredBlockData(blockChunkHolder);
-  }
-
-  @Override public Object getDataBasedOnDataTypeFromSurrogates(ByteBuffer surrogateData) {
-    int dataLength = surrogateData.getInt();
-    if (dataLength == -1) {
-      return null;
-    }
-    Object[] data = new Object[dataLength];
-    for (int i = 0; i < dataLength; i++) {
-      data[i] = children.getDataBasedOnDataTypeFromSurrogates(surrogateData);
-    }
-    return new GenericArrayData(data);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/complextypes/ComplexQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/complextypes/ComplexQueryType.java b/core/src/main/java/org/carbondata/scan/complextypes/ComplexQueryType.java
deleted file mode 100644
index c6631f3..0000000
--- a/core/src/main/java/org/carbondata/scan/complextypes/ComplexQueryType.java
+++ /dev/null
@@ -1,80 +0,0 @@
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.complextypes;
-
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.scan.filter.GenericQueryType;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public class ComplexQueryType {
-  protected GenericQueryType children;
-
-  protected String name;
-
-  protected String parentname;
-
-  protected int blockIndex;
-
-  public ComplexQueryType(String name, String parentname, int blockIndex) {
-    this.name = name;
-    this.parentname = parentname;
-    this.blockIndex = blockIndex;
-  }
-
-  public void fillRequiredBlockData(BlocksChunkHolder blockChunkHolder) {
-    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
-      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
-          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
-    }
-    children.fillRequiredBlockData(blockChunkHolder);
-  }
-
-  /**
-   * Method will copy the block chunk holder data to the passed
-   * byte[], this method is also used by child
-   *
-   * @param rowNumber
-   * @param input
-   */
-  protected void copyBlockDataChunk(DimensionColumnDataChunk[] dimensionColumnDataChunks,
-      int rowNumber, byte[] input) {
-    byte[] data = (byte[]) dimensionColumnDataChunks[blockIndex].getCompleteDataChunk();
-    if (null != dimensionColumnDataChunks[blockIndex].getAttributes().getInvertedIndexes()) {
-      System.arraycopy(data, dimensionColumnDataChunks[blockIndex].getAttributes()
-              .getInvertedIndexesReverse()[rowNumber] * dimensionColumnDataChunks[blockIndex]
-              .getAttributes().getColumnValueSize(), input, 0,
-          dimensionColumnDataChunks[blockIndex].getAttributes().getColumnValueSize());
-    } else {
-      System.arraycopy(data,
-          rowNumber * dimensionColumnDataChunks[blockIndex].getAttributes().getColumnValueSize(),
-          input, 0, dimensionColumnDataChunks[blockIndex].getAttributes().getColumnValueSize());
-    }
-  }
-
-  /*
-   * This method will read the block data chunk from the respective block
-   */
-  protected void readBlockDataChunk(BlocksChunkHolder blockChunkHolder) {
-    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
-      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
-          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/complextypes/PrimitiveQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/complextypes/PrimitiveQueryType.java b/core/src/main/java/org/carbondata/scan/complextypes/PrimitiveQueryType.java
deleted file mode 100644
index 33a22c7..0000000
--- a/core/src/main/java/org/carbondata/scan/complextypes/PrimitiveQueryType.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.complextypes;
-
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
-import org.carbondata.core.keygenerator.mdkey.Bits;
-import org.carbondata.core.util.DataTypeUtil;
-import org.carbondata.scan.filter.GenericQueryType;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-import org.apache.spark.sql.types.*;
-
-public class PrimitiveQueryType extends ComplexQueryType implements GenericQueryType {
-
-  private int index;
-
-  private String name;
-  private String parentname;
-
-  private int keySize;
-
-  private int blockIndex;
-
-  private Dictionary dictionary;
-
-  private org.carbondata.core.carbon.metadata.datatype.DataType dataType;
-
-  private boolean isDirectDictionary;
-
-  public PrimitiveQueryType(String name, String parentname, int blockIndex,
-      org.carbondata.core.carbon.metadata.datatype.DataType dataType, int keySize,
-      Dictionary dictionary, boolean isDirectDictionary) {
-    super(name, parentname, blockIndex);
-    this.dataType = dataType;
-    this.keySize = keySize;
-    this.dictionary = dictionary;
-    this.name = name;
-    this.parentname = parentname;
-    this.blockIndex = blockIndex;
-    this.isDirectDictionary = isDirectDictionary;
-  }
-
-  @Override public void addChildren(GenericQueryType children) {
-
-  }
-
-  @Override public String getName() {
-    return name;
-  }
-
-  @Override public void setName(String name) {
-    this.name = name;
-  }
-
-  @Override public String getParentname() {
-    return parentname;
-  }
-
-  @Override public void setParentname(String parentname) {
-    this.parentname = parentname;
-
-  }
-
-  @Override public void getAllPrimitiveChildren(List<GenericQueryType> primitiveChild) {
-
-  }
-
-  @Override public int getSurrogateIndex() {
-    return index;
-  }
-
-  @Override public void setSurrogateIndex(int surrIndex) {
-    index = surrIndex;
-  }
-
-  @Override public int getBlockIndex() {
-    return blockIndex;
-  }
-
-  @Override public void setBlockIndex(int blockIndex) {
-    this.blockIndex = blockIndex;
-  }
-
-  @Override public int getColsCount() {
-    return 1;
-  }
-
-  @Override public void parseBlocksAndReturnComplexColumnByteArray(
-      DimensionColumnDataChunk[] dimensionDataChunks, int rowNumber,
-      DataOutputStream dataOutputStream) throws IOException {
-    byte[] currentVal =
-        new byte[dimensionDataChunks[blockIndex].getAttributes().getColumnValueSize()];
-    copyBlockDataChunk(dimensionDataChunks, rowNumber, currentVal);
-    dataOutputStream.write(currentVal);
-  }
-
-  @Override public void setKeySize(int[] keyBlockSize) {
-    this.keySize = keyBlockSize[this.blockIndex];
-  }
-
-  @Override public void parseAndGetResultBytes(ByteBuffer complexData, DataOutputStream dataOutput)
-      throws IOException {
-  }
-
-  @Override public DataType getSchemaType() {
-    switch (dataType) {
-      case INT:
-        return IntegerType$.MODULE$;
-      case DOUBLE:
-        return DoubleType$.MODULE$;
-      case LONG:
-        return LongType$.MODULE$;
-      case BOOLEAN:
-        return BooleanType$.MODULE$;
-      case TIMESTAMP:
-        return TimestampType$.MODULE$;
-      default:
-        return IntegerType$.MODULE$;
-    }
-  }
-
-  @Override public int getKeyOrdinalForQuery() {
-    return 0;
-  }
-
-  @Override public void setKeyOrdinalForQuery(int keyOrdinalForQuery) {
-  }
-
-  @Override public void fillRequiredBlockData(BlocksChunkHolder blockChunkHolder) {
-    readBlockDataChunk(blockChunkHolder);
-  }
-
-  @Override public Object getDataBasedOnDataTypeFromSurrogates(ByteBuffer surrogateData) {
-
-    byte[] data = new byte[keySize];
-    surrogateData.get(data);
-    Bits bit = new Bits(new int[]{keySize * 8});
-    int surrgateValue = (int)bit.getKeyArray(data, 0)[0];
-    Object actualData = null;
-    if (isDirectDictionary) {
-      DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
-          .getDirectDictionaryGenerator(dataType);
-      actualData = directDictionaryGenerator.getValueFromSurrogate(surrgateValue);
-    } else {
-      String dictionaryValueForKey = dictionary.getDictionaryValueForKey(surrgateValue);
-      actualData = DataTypeUtil.getDataBasedOnDataType(dictionaryValueForKey, this.dataType);
-    }
-    return actualData;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/complextypes/StructQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/complextypes/StructQueryType.java b/core/src/main/java/org/carbondata/scan/complextypes/StructQueryType.java
deleted file mode 100644
index 0cb534c..0000000
--- a/core/src/main/java/org/carbondata/scan/complextypes/StructQueryType.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.complextypes;
-
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.scan.filter.GenericQueryType;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-import org.apache.spark.sql.catalyst.expressions.GenericInternalRowWithSchema;
-import org.apache.spark.sql.types.DataType;
-import org.apache.spark.sql.types.Metadata;
-import org.apache.spark.sql.types.StructField;
-import org.apache.spark.sql.types.StructType;
-
-public class StructQueryType extends ComplexQueryType implements GenericQueryType {
-
-  private List<GenericQueryType> children = new ArrayList<GenericQueryType>();
-  private String name;
-  private String parentname;
-  private int blockIndex;
-  private int keyOrdinalForQuery;
-
-  public StructQueryType(String name, String parentname, int blockIndex) {
-    super(name, parentname, blockIndex);
-    this.name = name;
-    this.parentname = parentname;
-    this.blockIndex = blockIndex;
-  }
-
-  @Override public void addChildren(GenericQueryType newChild) {
-    if (this.getName().equals(newChild.getParentname())) {
-      this.children.add(newChild);
-    } else {
-      for (GenericQueryType child : this.children) {
-        child.addChildren(newChild);
-      }
-    }
-
-  }
-
-  @Override public String getName() {
-    return name;
-  }
-
-  @Override public void setName(String name) {
-    this.name = name;
-  }
-
-  @Override public String getParentname() {
-    return parentname;
-  }
-
-  @Override public void setParentname(String parentname) {
-    this.parentname = parentname;
-
-  }
-
-  @Override public void getAllPrimitiveChildren(List<GenericQueryType> primitiveChild) {
-    for (int i = 0; i < children.size(); i++) {
-      GenericQueryType child = children.get(i);
-      if (child instanceof PrimitiveQueryType) {
-        primitiveChild.add(child);
-      } else {
-        child.getAllPrimitiveChildren(primitiveChild);
-      }
-    }
-  }
-
-  @Override public int getSurrogateIndex() {
-    return 0;
-  }
-
-  @Override public void setSurrogateIndex(int surrIndex) {
-
-  }
-
-  @Override public int getBlockIndex() {
-    return blockIndex;
-  }
-
-  @Override public void setBlockIndex(int blockIndex) {
-    this.blockIndex = blockIndex;
-  }
-
-  @Override public int getColsCount() {
-    int colsCount = 1;
-    for (int i = 0; i < children.size(); i++) {
-      colsCount += children.get(i).getColsCount();
-    }
-    return colsCount;
-  }
-
-  @Override public void parseBlocksAndReturnComplexColumnByteArray(
-      DimensionColumnDataChunk[] dimensionColumnDataChunks, int rowNumber,
-      DataOutputStream dataOutputStream) throws IOException {
-    byte[] input = new byte[8];
-    copyBlockDataChunk(dimensionColumnDataChunks, rowNumber, input);
-    ByteBuffer byteArray = ByteBuffer.wrap(input);
-    int childElement = byteArray.getInt();
-    dataOutputStream.writeInt(childElement);
-    if (childElement == 0) {
-      // b.putInt(0);
-    } else {
-      for (int i = 0; i < childElement; i++) {
-        children.get(i)
-            .parseBlocksAndReturnComplexColumnByteArray(dimensionColumnDataChunks, rowNumber,
-                dataOutputStream);
-      }
-    }
-  }
-
-  @Override public void parseAndGetResultBytes(ByteBuffer complexData, DataOutputStream dataOutput)
-      throws IOException {
-    int childElement = complexData.getInt();
-    dataOutput.writeInt(childElement);
-    for (int i = 0; i < childElement; i++) {
-      children.get(i).parseAndGetResultBytes(complexData, dataOutput);
-    }
-  }
-
-  @Override public void setKeySize(int[] keyBlockSize) {
-    for (int i = 0; i < children.size(); i++) {
-      children.get(i).setKeySize(keyBlockSize);
-    }
-  }
-
-  @Override public DataType getSchemaType() {
-    StructField[] fields = new StructField[children.size()];
-    for (int i = 0; i < children.size(); i++) {
-      fields[i] = new StructField(children.get(i).getName(), null, true,
-          Metadata.empty());
-    }
-    return new StructType(fields);
-  }
-
-  @Override public int getKeyOrdinalForQuery() {
-    return keyOrdinalForQuery;
-  }
-
-  @Override public void setKeyOrdinalForQuery(int keyOrdinalForQuery) {
-    this.keyOrdinalForQuery = keyOrdinalForQuery;
-  }
-
-  @Override public void fillRequiredBlockData(BlocksChunkHolder blockChunkHolder) {
-    readBlockDataChunk(blockChunkHolder);
-
-    for (int i = 0; i < children.size(); i++) {
-      children.get(i).fillRequiredBlockData(blockChunkHolder);
-    }
-  }
-
-  @Override public Object getDataBasedOnDataTypeFromSurrogates(ByteBuffer surrogateData) {
-    int childLength = surrogateData.getInt();
-    Object[] fields = new Object[childLength];
-    for (int i = 0; i < childLength; i++) {
-      fields[i] =  children.get(i).getDataBasedOnDataTypeFromSurrogates(surrogateData);
-    }
-
-    return new GenericInternalRowWithSchema(fields, (StructType) getSchemaType());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/QueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/QueryExecutor.java b/core/src/main/java/org/carbondata/scan/executor/QueryExecutor.java
deleted file mode 100644
index eea9988..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/QueryExecutor.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor;
-
-import org.carbondata.common.CarbonIterator;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.model.QueryModel;
-
-/**
- * Interface for carbon query executor.
- * Will be used to execute the query based on the query model
- * and will return the iterator over query result
- */
-public interface QueryExecutor<E> {
-
-  /**
-   * Below method will be used to execute the query based on query model passed from driver
-   *
-   * @param queryModel query details
-   * @return query result iterator
-   * @throws QueryExecutionException if any failure while executing the query
-   */
-  CarbonIterator<E> execute(QueryModel queryModel) throws QueryExecutionException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/QueryExecutorFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/QueryExecutorFactory.java b/core/src/main/java/org/carbondata/scan/executor/QueryExecutorFactory.java
deleted file mode 100644
index 7f7203c..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/QueryExecutorFactory.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor;
-
-import org.carbondata.scan.executor.impl.DetailQueryExecutor;
-import org.carbondata.scan.model.QueryModel;
-
-/**
- * Factory class to get the query executor from RDD
- * This will return the executor based on query type
- */
-public class QueryExecutorFactory {
-
-  public static QueryExecutor getQueryExecutor(QueryModel queryModel) {
-    return new DetailQueryExecutor();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/exception/QueryExecutionException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/exception/QueryExecutionException.java b/core/src/main/java/org/carbondata/scan/executor/exception/QueryExecutionException.java
deleted file mode 100644
index f5d0e81..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/exception/QueryExecutionException.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor.exception;
-
-import java.util.Locale;
-
-/**
- * Exception class for query execution
- *
- * @author Administrator
- */
-public class QueryExecutionException extends Exception {
-
-  /**
-   * default serial version ID.
-   */
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * The Error message.
-   */
-  private String msg = "";
-
-  /**
-   * Constructor
-   *
-   * @param errorCode The error code for this exception.
-   * @param msg       The error message for this exception.
-   */
-  public QueryExecutionException(String msg) {
-    super(msg);
-    this.msg = msg;
-  }
-
-  /**
-   * Constructor
-   *
-   * @param errorCode The error code for this exception.
-   * @param msg       The error message for this exception.
-   */
-  public QueryExecutionException(String msg, Throwable t) {
-    super(msg, t);
-    this.msg = msg;
-  }
-
-  /**
-   * Constructor
-   *
-   * @param t
-   */
-  public QueryExecutionException(Throwable t) {
-    super(t);
-  }
-
-  /**
-   * This method is used to get the localized message.
-   *
-   * @param locale - A Locale object represents a specific geographical,
-   *               political, or cultural region.
-   * @return - Localized error message.
-   */
-  public String getLocalizedMessage(Locale locale) {
-    return "";
-  }
-
-  /**
-   * getLocalizedMessage
-   */
-  @Override public String getLocalizedMessage() {
-    return super.getLocalizedMessage();
-  }
-
-  /**
-   * getMessage
-   */
-  public String getMessage() {
-    return this.msg;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/impl/AbstractQueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/impl/AbstractQueryExecutor.java b/core/src/main/java/org/carbondata/scan/executor/impl/AbstractQueryExecutor.java
deleted file mode 100644
index 519d4e6..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/impl/AbstractQueryExecutor.java
+++ /dev/null
@@ -1,412 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor.impl;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.common.logging.impl.StandardLogService;
-import org.carbondata.core.carbon.datastore.BlockIndexStore;
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.carbon.datastore.block.AbstractIndex;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.exception.IndexBuilderException;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-import org.carbondata.core.carbon.querystatistics.QueryStatistic;
-import org.carbondata.core.carbon.querystatistics.QueryStatisticsRecorder;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.keygenerator.KeyGenException;
-import org.carbondata.core.keygenerator.KeyGenerator;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.scan.executor.QueryExecutor;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.executor.infos.AggregatorInfo;
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-import org.carbondata.scan.executor.util.QueryUtil;
-import org.carbondata.scan.executor.util.RestructureUtil;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.model.QueryDimension;
-import org.carbondata.scan.model.QueryMeasure;
-import org.carbondata.scan.model.QueryModel;
-
-import org.apache.commons.lang3.ArrayUtils;
-
-/**
- * This class provides a skeletal implementation of the {@link QueryExecutor}
- * interface to minimize the effort required to implement this interface. This
- * will be used to prepare all the properties required for query execution
- */
-public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(AbstractQueryExecutor.class.getName());
-  /**
-   * holder for query properties which will be used to execute the query
-   */
-  protected QueryExecutorProperties queryProperties;
-
-  public AbstractQueryExecutor() {
-    queryProperties = new QueryExecutorProperties();
-  }
-
-  /**
-   * Below method will be used to fill the executor properties based on query
-   * model it will parse the query model and get the detail and fill it in
-   * query properties
-   *
-   * @param queryModel
-   */
-  protected void initQuery(QueryModel queryModel) throws QueryExecutionException {
-    StandardLogService.setThreadName(StandardLogService.getPartitionID(
-        queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName()),
-        queryModel.getQueryId());
-    LOGGER.info("Query will be executed on table: " + queryModel.getAbsoluteTableIdentifier()
-        .getCarbonTableIdentifier().getTableName());
-    // Initializing statistics list to record the query statistics
-    // creating copy on write to handle concurrent scenario
-    queryProperties.queryStatisticsRecorder = new QueryStatisticsRecorder(queryModel.getQueryId());
-    queryModel.setStatisticsRecorder(queryProperties.queryStatisticsRecorder);
-    QueryUtil.resolveQueryModel(queryModel);
-    QueryStatistic queryStatistic = new QueryStatistic();
-    // get the table blocks
-    try {
-      queryProperties.dataBlocks = BlockIndexStore.getInstance()
-          .loadAndGetBlocks(queryModel.getTableBlockInfos(),
-              queryModel.getAbsoluteTableIdentifier());
-    } catch (IndexBuilderException e) {
-      throw new QueryExecutionException(e);
-    }
-    queryStatistic
-        .addStatistics("Time taken to load the Block(s) In Executor", System.currentTimeMillis());
-    queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
-    //
-    // // updating the restructuring infos for the query
-    queryProperties.keyStructureInfo = getKeyStructureInfo(queryModel,
-        queryProperties.dataBlocks.get(queryProperties.dataBlocks.size() - 1).getSegmentProperties()
-            .getDimensionKeyGenerator());
-
-    // calculating the total number of aggeragted columns
-    int aggTypeCount = queryModel.getQueryMeasures().size();
-
-    int currentIndex = 0;
-    String[] aggTypes = new String[aggTypeCount];
-    DataType[] dataTypes = new DataType[aggTypeCount];
-
-    for (QueryMeasure carbonMeasure : queryModel.getQueryMeasures()) {
-      // adding the data type and aggregation type of all the measure this
-      // can be used
-      // to select the aggregator
-      aggTypes[currentIndex] = carbonMeasure.getAggregateFunction();
-      dataTypes[currentIndex] = carbonMeasure.getMeasure().getDataType();
-      currentIndex++;
-    }
-    queryProperties.measureDataTypes = dataTypes;
-    // as aggregation will be executed in following order
-    // 1.aggregate dimension expression
-    // 2. expression
-    // 3. query measure
-    // so calculating the index of the expression start index
-    // and measure column start index
-    queryProperties.aggExpressionStartIndex = queryModel.getQueryMeasures().size();
-    queryProperties.measureStartIndex = aggTypes.length - queryModel.getQueryMeasures().size();
-
-    queryProperties.complexFilterDimension =
-        QueryUtil.getAllFilterDimensions(queryModel.getFilterExpressionResolverTree());
-    queryStatistic = new QueryStatistic();
-    // dictionary column unique column id to dictionary mapping
-    // which will be used to get column actual data
-    queryProperties.columnToDictionayMapping = QueryUtil
-        .getDimensionDictionaryDetail(queryModel.getQueryDimension(),
-            queryProperties.complexFilterDimension, queryModel.getAbsoluteTableIdentifier());
-    queryStatistic
-        .addStatistics("Time taken to load the Dictionary In Executor", System.currentTimeMillis());
-    queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
-    queryModel.setColumnToDictionaryMapping(queryProperties.columnToDictionayMapping);
-    // setting the sort dimension index. as it will be updated while getting the sort info
-    // so currently setting it to default 0 means sort is not present in any dimension
-    queryProperties.sortDimIndexes = new byte[queryModel.getQueryDimension().size()];
-  }
-
-  /**
-   * Below method will be used to get the key structure info for the uqery
-   *
-   * @param queryModel   query model
-   * @param keyGenerator
-   * @return key structure info
-   */
-  private KeyStructureInfo getKeyStructureInfo(QueryModel queryModel, KeyGenerator keyGenerator) {
-    // getting the masked byte range for dictionary column
-    int[] maskByteRanges =
-        QueryUtil.getMaskedByteRange(queryModel.getQueryDimension(), keyGenerator);
-
-    // getting the masked bytes for query dimension dictionary column
-    int[] maskedBytes = QueryUtil.getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
-
-    // max key for the dictionary dimension present in the query
-    byte[] maxKey = null;
-    try {
-      // getting the max key which will be used to masked and get the
-      // masked key
-      maxKey = QueryUtil.getMaxKeyBasedOnDimensions(queryModel.getQueryDimension(), keyGenerator);
-    } catch (KeyGenException e) {
-      LOGGER.error(e, "problem while getting the max key");
-    }
-
-    KeyStructureInfo restructureInfos = new KeyStructureInfo();
-    restructureInfos.setKeyGenerator(keyGenerator);
-    restructureInfos.setMaskByteRanges(maskByteRanges);
-    restructureInfos.setMaskedBytes(maskedBytes);
-    restructureInfos.setMaxKey(maxKey);
-    return restructureInfos;
-  }
-
-  protected List<BlockExecutionInfo> getBlockExecutionInfos(QueryModel queryModel)
-      throws QueryExecutionException {
-    initQuery(queryModel);
-    List<BlockExecutionInfo> blockExecutionInfoList = new ArrayList<BlockExecutionInfo>();
-    // fill all the block execution infos for all the blocks selected in
-    // query
-    // and query will be executed based on that infos
-    for (int i = 0; i < queryProperties.dataBlocks.size(); i++) {
-      blockExecutionInfoList
-          .add(getBlockExecutionInfoForBlock(queryModel, queryProperties.dataBlocks.get(i)));
-    }
-    queryProperties.complexDimensionInfoMap =
-        blockExecutionInfoList.get(blockExecutionInfoList.size() - 1).getComlexDimensionInfoMap();
-    return blockExecutionInfoList;
-  }
-
-  /**
-   * Below method will be used to get the block execution info which is
-   * required to execute any block  based on query model
-   *
-   * @param queryModel query model from user query
-   * @param blockIndex block index
-   * @return block execution info
-   * @throws QueryExecutionException any failure during block info creation
-   */
-  protected BlockExecutionInfo getBlockExecutionInfoForBlock(QueryModel queryModel,
-      AbstractIndex blockIndex) throws QueryExecutionException {
-    BlockExecutionInfo blockExecutionInfo = new BlockExecutionInfo();
-    SegmentProperties segmentProperties = blockIndex.getSegmentProperties();
-    List<CarbonDimension> tableBlockDimensions = segmentProperties.getDimensions();
-    KeyGenerator blockKeyGenerator = segmentProperties.getDimensionKeyGenerator();
-
-    // below is to get only those dimension in query which is present in the
-    // table block
-    List<QueryDimension> updatedQueryDimension = RestructureUtil
-        .getUpdatedQueryDimension(queryModel.getQueryDimension(), tableBlockDimensions,
-            segmentProperties.getComplexDimensions());
-    // TODO add complex dimension children
-    int[] maskByteRangesForBlock =
-        QueryUtil.getMaskedByteRange(updatedQueryDimension, blockKeyGenerator);
-    int[] maksedByte =
-        QueryUtil.getMaskedByte(blockKeyGenerator.getKeySizeInBytes(), maskByteRangesForBlock);
-    blockExecutionInfo.setQueryDimensions(
-        updatedQueryDimension.toArray(new QueryDimension[updatedQueryDimension.size()]));
-    blockExecutionInfo.setQueryMeasures(queryModel.getQueryMeasures()
-        .toArray(new QueryMeasure[queryModel.getQueryMeasures().size()]));
-    blockExecutionInfo.setDataBlock(blockIndex);
-    blockExecutionInfo.setBlockKeyGenerator(blockKeyGenerator);
-    // adding aggregation info for query
-    blockExecutionInfo.setAggregatorInfo(getAggregatorInfoForBlock(queryModel, blockIndex));
-    // adding query statistics list to record the statistics
-    blockExecutionInfo.setStatisticsRecorder(queryProperties.queryStatisticsRecorder);
-    // setting the limit
-    blockExecutionInfo.setLimit(queryModel.getLimit());
-    // setting whether detail query or not
-    blockExecutionInfo.setDetailQuery(queryModel.isDetailQuery());
-    // setting whether raw record query or not
-    blockExecutionInfo.setRawRecordDetailQuery(queryModel.isForcedDetailRawQuery());
-    // setting the masked byte of the block which will be
-    // used to update the unpack the older block keys
-    blockExecutionInfo.setMaskedByteForBlock(maksedByte);
-    // total number dimension
-    blockExecutionInfo
-        .setTotalNumberDimensionBlock(segmentProperties.getDimensionOrdinalToBlockMapping().size());
-    blockExecutionInfo
-        .setTotalNumberOfMeasureBlock(segmentProperties.getMeasuresOrdinalToBlockMapping().size());
-    blockExecutionInfo.setComplexDimensionInfoMap(QueryUtil
-        .getComplexDimensionsMap(updatedQueryDimension,
-            segmentProperties.getDimensionOrdinalToBlockMapping(),
-            segmentProperties.getEachComplexDimColumnValueSize(),
-            queryProperties.columnToDictionayMapping, queryProperties.complexFilterDimension));
-    // to check whether older block key update is required or not
-    blockExecutionInfo.setFixedKeyUpdateRequired(
-        !blockKeyGenerator.equals(queryProperties.keyStructureInfo.getKeyGenerator()));
-    IndexKey startIndexKey = null;
-    IndexKey endIndexKey = null;
-    if (null != queryModel.getFilterExpressionResolverTree()) {
-      // loading the filter executer tree for filter evaluation
-      blockExecutionInfo.setFilterExecuterTree(FilterUtil
-          .getFilterExecuterTree(queryModel.getFilterExpressionResolverTree(), segmentProperties,
-              blockExecutionInfo.getComlexDimensionInfoMap()));
-      List<IndexKey> listOfStartEndKeys = new ArrayList<IndexKey>(2);
-      FilterUtil.traverseResolverTreeAndGetStartAndEndKey(segmentProperties,
-          queryModel.getAbsoluteTableIdentifier(), queryModel.getFilterExpressionResolverTree(),
-          listOfStartEndKeys);
-      startIndexKey = listOfStartEndKeys.get(0);
-      endIndexKey = listOfStartEndKeys.get(1);
-    } else {
-      try {
-        startIndexKey = FilterUtil.prepareDefaultStartIndexKey(segmentProperties);
-        endIndexKey = FilterUtil.prepareDefaultEndIndexKey(segmentProperties);
-      } catch (KeyGenException e) {
-        throw new QueryExecutionException(e);
-      }
-    }
-    blockExecutionInfo.setFileType(
-        FileFactory.getFileType(queryModel.getAbsoluteTableIdentifier().getStorePath()));
-    //setting the start index key of the block node
-    blockExecutionInfo.setStartKey(startIndexKey);
-    //setting the end index key of the block node
-    blockExecutionInfo.setEndKey(endIndexKey);
-    // expression dimensions
-    List<CarbonDimension> expressionDimensions =
-        new ArrayList<CarbonDimension>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    // expression measure
-    List<CarbonMeasure> expressionMeasures =
-        new ArrayList<CarbonMeasure>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    // setting all the dimension chunk indexes to be read from file
-    blockExecutionInfo.setAllSelectedDimensionBlocksIndexes(QueryUtil
-        .getDimensionsBlockIndexes(updatedQueryDimension,
-            segmentProperties.getDimensionOrdinalToBlockMapping(), expressionDimensions));
-    // setting all the measure chunk indexes to be read from file
-    blockExecutionInfo.setAllSelectedMeasureBlocksIndexes(QueryUtil
-        .getMeasureBlockIndexes(queryModel.getQueryMeasures(), expressionMeasures,
-            segmentProperties.getMeasuresOrdinalToBlockMapping()));
-    // setting the key structure info which will be required
-    // to update the older block key with new key generator
-    blockExecutionInfo.setKeyStructureInfo(queryProperties.keyStructureInfo);
-    // setting the size of fixed key column (dictionary column)
-    blockExecutionInfo.setFixedLengthKeySize(getKeySize(updatedQueryDimension, segmentProperties));
-    Set<Integer> dictionaryColumnBlockIndex = new HashSet<Integer>();
-    List<Integer> noDictionaryColumnBlockIndex = new ArrayList<Integer>();
-    // get the block index to be read from file for query dimension
-    // for both dictionary columns and no dictionary columns
-    QueryUtil.fillQueryDimensionsBlockIndexes(updatedQueryDimension,
-        segmentProperties.getDimensionOrdinalToBlockMapping(), dictionaryColumnBlockIndex,
-        noDictionaryColumnBlockIndex);
-    int[] queryDictionaryColumnBlockIndexes = ArrayUtils.toPrimitive(
-        dictionaryColumnBlockIndex.toArray(new Integer[dictionaryColumnBlockIndex.size()]));
-    // need to sort the dictionary column as for all dimension
-    // column key will be filled based on key order
-    Arrays.sort(queryDictionaryColumnBlockIndexes);
-    blockExecutionInfo.setDictionaryColumnBlockIndex(queryDictionaryColumnBlockIndexes);
-    // setting the no dictionary column block indexes
-    blockExecutionInfo.setNoDictionaryBlockIndexes(ArrayUtils.toPrimitive(
-        noDictionaryColumnBlockIndex.toArray(new Integer[noDictionaryColumnBlockIndex.size()])));
-    // setting column id to dictionary mapping
-    blockExecutionInfo.setColumnIdToDcitionaryMapping(queryProperties.columnToDictionayMapping);
-    // setting each column value size
-    blockExecutionInfo.setEachColumnValueSize(segmentProperties.getEachDimColumnValueSize());
-    blockExecutionInfo.setComplexColumnParentBlockIndexes(
-        getComplexDimensionParentBlockIndexes(updatedQueryDimension));
-    try {
-      // to set column group and its key structure info which will be used
-      // to
-      // for getting the column group column data in case of final row
-      // and in case of dimension aggregation
-      blockExecutionInfo.setColumnGroupToKeyStructureInfo(
-          QueryUtil.getColumnGroupKeyStructureInfo(updatedQueryDimension, segmentProperties));
-    } catch (KeyGenException e) {
-      throw new QueryExecutionException(e);
-    }
-    return blockExecutionInfo;
-  }
-
-  /**
-   * This method will be used to get fixed key length size this will be used
-   * to create a row from column chunk
-   *
-   * @param queryDimension    query dimension
-   * @param blockMetadataInfo block metadata info
-   * @return key size
-   */
-  private int getKeySize(List<QueryDimension> queryDimension, SegmentProperties blockMetadataInfo) {
-    List<Integer> fixedLengthDimensionOrdinal =
-        new ArrayList<Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    int counter = 0;
-    while (counter < queryDimension.size()) {
-      if (queryDimension.get(counter).getDimension().numberOfChild() > 0) {
-        counter += queryDimension.get(counter).getDimension().numberOfChild();
-        continue;
-      } else if (!CarbonUtil.hasEncoding(queryDimension.get(counter).getDimension().getEncoder(),
-          Encoding.DICTIONARY)) {
-        counter++;
-      } else {
-        fixedLengthDimensionOrdinal.add(queryDimension.get(counter).getDimension().getKeyOrdinal());
-        counter++;
-      }
-    }
-    int[] dictioanryColumnOrdinal = ArrayUtils.toPrimitive(
-        fixedLengthDimensionOrdinal.toArray(new Integer[fixedLengthDimensionOrdinal.size()]));
-    if (dictioanryColumnOrdinal.length > 0) {
-      return blockMetadataInfo.getFixedLengthKeySplitter()
-          .getKeySizeByBlock(dictioanryColumnOrdinal);
-    }
-    return 0;
-  }
-
-  /**
-   * Below method will be used to get the aggrgator info for the query
-   *
-   * @param queryModel query model
-   * @param tableBlock table block
-   * @return aggregator info
-   */
-  private AggregatorInfo getAggregatorInfoForBlock(QueryModel queryModel,
-      AbstractIndex tableBlock) {
-    // getting the aggregate infos which will be used during aggregation
-    AggregatorInfo aggregatorInfos = RestructureUtil
-        .getAggregatorInfos(queryModel.getQueryMeasures(),
-            tableBlock.getSegmentProperties().getMeasures());
-    // setting the index of expression in measure aggregators
-    aggregatorInfos.setExpressionAggregatorStartIndex(queryProperties.aggExpressionStartIndex);
-    // setting the index of measure columns in measure aggregators
-    aggregatorInfos.setMeasureAggregatorStartIndex(queryProperties.measureStartIndex);
-    // setting the measure aggregator for all aggregation function selected
-    // in query
-    aggregatorInfos.setMeasureDataTypes(queryProperties.measureDataTypes);
-    return aggregatorInfos;
-  }
-
-  private int[] getComplexDimensionParentBlockIndexes(List<QueryDimension> queryDimensions) {
-    List<Integer> parentBlockIndexList = new ArrayList<Integer>();
-    for (QueryDimension queryDimension : queryDimensions) {
-      if (CarbonUtil.hasDataType(queryDimension.getDimension().getDataType(),
-          new DataType[] { DataType.ARRAY, DataType.STRUCT, DataType.MAP })) {
-        parentBlockIndexList.add(queryDimension.getDimension().getOrdinal());
-      }
-    }
-    return ArrayUtils
-        .toPrimitive(parentBlockIndexList.toArray(new Integer[parentBlockIndexList.size()]));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/impl/DetailQueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/impl/DetailQueryExecutor.java b/core/src/main/java/org/carbondata/scan/executor/impl/DetailQueryExecutor.java
deleted file mode 100644
index 30eb2d2..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/impl/DetailQueryExecutor.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor.impl;
-
-import java.util.List;
-
-import org.carbondata.common.CarbonIterator;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.executor.infos.BlockExecutionInfo;
-import org.carbondata.scan.model.QueryModel;
-import org.carbondata.scan.result.iterator.DetailQueryResultIterator;
-
-/**
- * Below class will be used to execute the detail query
- * For executing the detail query it will pass all the block execution
- * info to detail query result iterator and iterator will be returned
- */
-public class DetailQueryExecutor extends AbstractQueryExecutor {
-
-  @Override public CarbonIterator<Object[]> execute(QueryModel queryModel)
-      throws QueryExecutionException {
-    List<BlockExecutionInfo> blockExecutionInfoList = getBlockExecutionInfos(queryModel);
-    return new DetailQueryResultIterator(blockExecutionInfoList, queryModel);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/impl/QueryExecutorProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/impl/QueryExecutorProperties.java b/core/src/main/java/org/carbondata/scan/executor/impl/QueryExecutorProperties.java
deleted file mode 100644
index 10fac8d..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/impl/QueryExecutorProperties.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor.impl;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.carbon.datastore.block.AbstractIndex;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.core.carbon.querystatistics.QueryStatisticsRecorder;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-import org.carbondata.scan.filter.GenericQueryType;
-
-/**
- * Holds all the properties required for query execution
- */
-public class QueryExecutorProperties {
-
-  /**
-   * holds the information required for updating the order block
-   * dictionary key
-   */
-  public KeyStructureInfo keyStructureInfo;
-  /**
-   * as we have multiple type of column aggregation like
-   * dimension,expression,measure so this will be used to for getting the
-   * measure aggregation start index
-   */
-  public int measureStartIndex;
-  /**
-   * query like count(1),count(*) ,etc will used this parameter
-   */
-  public boolean isFunctionQuery;
-  /**
-   * aggExpressionStartIndex
-   */
-  public int aggExpressionStartIndex;
-  /**
-   * index of the dimension which is present in the order by
-   * in a query
-   */
-  public byte[] sortDimIndexes;
-
-  /**
-   * this will hold the information about the dictionary dimension
-   * which to
-   */
-  public Map<String, Dictionary> columnToDictionayMapping;
-
-  /**
-   * Measure datatypes
-   */
-  public DataType[] measureDataTypes;
-  /**
-   * complex parent index to query mapping
-   */
-  public Map<Integer, GenericQueryType> complexDimensionInfoMap;
-  /**
-   * all the complex dimension which is on filter
-   */
-  public Set<CarbonDimension> complexFilterDimension;
-  /**
-   * to record the query execution details phase wise
-   */
-  public QueryStatisticsRecorder queryStatisticsRecorder;
-  /**
-   * list of blocks in which query will be executed
-   */
-  protected List<AbstractIndex> dataBlocks;
-
-}



[15/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataFileStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataFileStore.java
deleted file mode 100644
index dd92dfa..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataFileStore.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.data.compressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.MeasureDataWrapper;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.datastorage.store.impl.CompressedDataMeasureDataWrapper;
-
-public class HeavyCompressedDoubleArrayDataFileStore
-    extends AbstractHeavyCompressedDoubleArrayDataStore {
-  /**
-   * measuresOffsetsArray.
-   */
-  private long[] measuresOffsetsArray;
-
-  /**
-   * measuresLengthArray.
-   */
-  private int[] measuresLengthArray;
-
-  /**
-   * fileName.
-   */
-  private String fileName;
-
-  /**
-   * HeavyCompressedDoubleArrayDataFileStore.
-   *
-   * @param compressionModel
-   * @param measuresOffsetsArray
-   * @param measuresLengthArray
-   * @param fileName
-   */
-  public HeavyCompressedDoubleArrayDataFileStore(ValueCompressionModel compressionModel,
-      long[] measuresOffsetsArray, int[] measuresLengthArray, String fileName) {
-    super(compressionModel);
-    if (null != compressionModel) {
-      this.fileName = fileName;
-      this.measuresLengthArray = measuresLengthArray;
-      this.measuresOffsetsArray = measuresOffsetsArray;
-      for (int i = 0; i < values.length; i++) {
-        values[i] = compressionModel.getUnCompressValues()[i].getNew().getCompressorObject();
-      }
-    }
-  }
-
-  @Override public MeasureDataWrapper getBackData(int[] cols, FileHolder fileHolder) {
-    if (null == compressionModel) {
-      return null;
-    }
-    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
-
-    if (cols != null) {
-      for (int i = 0; i < cols.length; i++) {
-        ValueCompressonHolder.UnCompressValue copy = values[cols[i]].getNew();
-        copy.setValue(fileHolder
-            .readByteArray(fileName, measuresOffsetsArray[cols[i]], measuresLengthArray[cols[i]]));
-        vals[cols[i]] = copy.uncompress(compressionModel.getChangedDataType()[cols[i]])
-            .getValues(compressionModel.getDecimal()[cols[i]],
-                compressionModel.getMaxValue()[cols[i]]);
-        copy = null;
-      }
-    } else {
-      for (int j = 0; j < vals.length; j++) {
-        ValueCompressonHolder.UnCompressValue copy = values[j].getNew();
-        copy.setValue(
-            fileHolder.readByteArray(fileName, measuresOffsetsArray[j], measuresLengthArray[j]));
-        vals[j] = copy.uncompress(compressionModel.getChangedDataType()[j])
-            .getValues(compressionModel.getDecimal()[j], compressionModel.getMaxValue()[j]);
-        copy = null;
-      }
-    }
-    return new CompressedDataMeasureDataWrapper(vals);
-
-  }
-
-  @Override public MeasureDataWrapper getBackData(int cols, FileHolder fileHolder) {
-    if (null == compressionModel) {
-      return null;
-    }
-    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
-    ValueCompressonHolder.UnCompressValue copy = values[cols].getNew();
-    copy.setValue(
-        fileHolder.readByteArray(fileName, measuresOffsetsArray[cols], measuresLengthArray[cols]));
-    vals[cols] = copy.uncompress(compressionModel.getChangedDataType()[cols])
-        .getValues(compressionModel.getDecimal()[cols], compressionModel.getMaxValue()[cols]);
-    return new CompressedDataMeasureDataWrapper(vals);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataInMemoryStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataInMemoryStore.java
deleted file mode 100644
index cf1585e..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataInMemoryStore.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.data.compressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.MeasureDataWrapper;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.datastorage.store.impl.CompressedDataMeasureDataWrapper;
-
-public class HeavyCompressedDoubleArrayDataInMemoryStore
-    extends AbstractHeavyCompressedDoubleArrayDataStore {
-
-  public HeavyCompressedDoubleArrayDataInMemoryStore(ValueCompressionModel compressionModel,
-      long[] measuresOffsetsArray, int[] measuresLengthArray, String fileName,
-      FileHolder fileHolder) {
-    super(compressionModel);
-    for (int i = 0; i < measuresLengthArray.length; i++) {
-      values[i] = compressionModel.getUnCompressValues()[i].getCompressorObject();
-      values[i].setValue(
-          fileHolder.readByteArray(fileName, measuresOffsetsArray[i], measuresLengthArray[i]));
-    }
-  }
-
-  public HeavyCompressedDoubleArrayDataInMemoryStore(ValueCompressionModel compressionModel) {
-    super(compressionModel);
-  }
-
-  @Override public MeasureDataWrapper getBackData(int[] cols, FileHolder fileHolder) {
-    if (null == compressionModel) {
-      return null;
-    }
-    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
-    if (cols != null) {
-      for (int i = 0; i < cols.length; i++) {
-        vals[cols[i]] = values[cols[i]].uncompress(compressionModel.getChangedDataType()[cols[i]])
-            .getValues(compressionModel.getDecimal()[cols[i]],
-                compressionModel.getMaxValue()[cols[i]]);
-      }
-    } else {
-      for (int i = 0; i < vals.length; i++) {
-
-        vals[i] = values[i].uncompress(compressionModel.getChangedDataType()[i])
-            .getValues(compressionModel.getDecimal()[i], compressionModel.getMaxValue()[i]);
-      }
-    }
-    return new CompressedDataMeasureDataWrapper(vals);
-  }
-
-  @Override public MeasureDataWrapper getBackData(int cols, FileHolder fileHolder) {
-    if (null == compressionModel) {
-      return null;
-    }
-    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
-    vals[cols] = values[cols].uncompress(compressionModel.getChangedDataType()[cols])
-        .getValues(compressionModel.getDecimal()[cols], compressionModel.getMaxValue()[cols]);
-    return new CompressedDataMeasureDataWrapper(vals);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/AbstractDoubleArrayDataStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/AbstractDoubleArrayDataStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/AbstractDoubleArrayDataStore.java
deleted file mode 100644
index 811fad0..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/AbstractDoubleArrayDataStore.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.data.uncompressed;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.NodeMeasureDataStore;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonWriteDataHolder;
-import org.carbondata.core.util.ValueCompressionUtil;
-
-public abstract class AbstractDoubleArrayDataStore implements NodeMeasureDataStore {
-
-  protected ValueCompressonHolder.UnCompressValue[] values;
-
-  protected ValueCompressionModel compressionModel;
-
-  private char[] type;
-
-  public AbstractDoubleArrayDataStore(ValueCompressionModel compressionModel) {
-    this.compressionModel = compressionModel;
-    if (null != compressionModel) {
-      values =
-          new ValueCompressonHolder.UnCompressValue[compressionModel.getUnCompressValues().length];
-      type = compressionModel.getType();
-    }
-  }
-
-  @Override public byte[][] getWritableMeasureDataArray(CarbonWriteDataHolder[] dataHolder) {
-    values =
-        new ValueCompressonHolder.UnCompressValue[compressionModel.getUnCompressValues().length];
-    for (int i = 0; i < compressionModel.getUnCompressValues().length; i++) {
-      values[i] = compressionModel.getUnCompressValues()[i].getNew();
-      if (type[i] != CarbonCommonConstants.BYTE_VALUE_MEASURE
-          && type[i] != CarbonCommonConstants.BIG_DECIMAL_MEASURE) {
-        if (type[i] == CarbonCommonConstants.BIG_INT_MEASURE) {
-          values[i].setValue(ValueCompressionUtil
-              .getCompressedValues(compressionModel.getCompType()[i],
-                  dataHolder[i].getWritableLongValues(), compressionModel.getChangedDataType()[i],
-                  (long) compressionModel.getMaxValue()[i], compressionModel.getDecimal()[i]));
-        } else {
-          values[i].setValue(ValueCompressionUtil
-              .getCompressedValues(compressionModel.getCompType()[i],
-                  dataHolder[i].getWritableDoubleValues(), compressionModel.getChangedDataType()[i],
-                  (double) compressionModel.getMaxValue()[i], compressionModel.getDecimal()[i]));
-        }
-      } else {
-        values[i].setValue(dataHolder[i].getWritableByteArrayValues());
-      }
-    }
-
-    byte[][] resturnValue = new byte[values.length][];
-
-    for (int i = 0; i < values.length; i++) {
-      resturnValue[i] = values[i].getBackArrayData();
-    }
-    return resturnValue;
-  }
-
-  @Override public short getLength() {
-    return values != null ? (short) values.length : 0;
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataFileStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataFileStore.java
deleted file mode 100644
index bc60765..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataFileStore.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.data.uncompressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.MeasureDataWrapper;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.datastorage.store.impl.CompressedDataMeasureDataWrapper;
-
-public class DoubleArrayDataFileStore extends AbstractDoubleArrayDataStore {
-
-  private long[] measuresOffsetsArray;
-
-  private int[] measuresLengthArray;
-
-  private String fileName;
-
-  public DoubleArrayDataFileStore(ValueCompressionModel compressionModel,
-      long[] measuresOffsetsArray, String fileName, int[] measuresLengthArray) {
-    super(compressionModel);
-    this.fileName = fileName;
-    this.measuresLengthArray = measuresLengthArray;
-    this.measuresOffsetsArray = measuresOffsetsArray;
-  }
-
-  @Override public MeasureDataWrapper getBackData(int[] cols, FileHolder fileHolder) {
-    if (null == compressionModel) {
-      return null;
-    }
-    UnCompressValue[] unComp = new UnCompressValue[measuresLengthArray.length];
-    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[measuresLengthArray.length];
-    if (cols != null) {
-      for (int i = 0; i < cols.length; i++) {
-        unComp[cols[i]] = compressionModel.getUnCompressValues()[cols[i]].getNew();
-        unComp[cols[i]].setValueInBytes(fileHolder
-            .readByteArray(fileName, measuresOffsetsArray[cols[i]], measuresLengthArray[cols[i]]));
-        vals[cols[i]] = unComp[cols[i]].getValues(compressionModel.getDecimal()[cols[i]],
-            compressionModel.getMaxValue()[cols[i]]);
-      }
-    } else {
-      for (int i = 0; i < unComp.length; i++) {
-
-        unComp[i] = compressionModel.getUnCompressValues()[i].getNew();
-        unComp[i].setValueInBytes(
-            fileHolder.readByteArray(fileName, measuresOffsetsArray[i], measuresLengthArray[i]));
-        vals[i] = unComp[i]
-            .getValues(compressionModel.getDecimal()[i], compressionModel.getMaxValue()[i]);
-      }
-    }
-    return new CompressedDataMeasureDataWrapper(vals);
-  }
-
-  @Override public MeasureDataWrapper getBackData(int cols, FileHolder fileHolder) {
-    if (null == compressionModel) {
-      return null;
-    }
-    UnCompressValue[] unComp = new UnCompressValue[measuresLengthArray.length];
-    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[measuresLengthArray.length];
-
-    unComp[cols] = compressionModel.getUnCompressValues()[cols].getNew();
-    unComp[cols].setValueInBytes(
-        fileHolder.readByteArray(fileName, measuresOffsetsArray[cols], measuresLengthArray[cols]));
-    vals[cols] = unComp[cols]
-        .getValues(compressionModel.getDecimal()[cols], compressionModel.getMaxValue()[cols]);
-    return new CompressedDataMeasureDataWrapper(vals);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataInMemoryStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataInMemoryStore.java
deleted file mode 100644
index 3adafa2..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataInMemoryStore.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.data.uncompressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.MeasureDataWrapper;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-import org.carbondata.core.datastorage.store.impl.CompressedDataMeasureDataWrapper;
-
-/**
- * DoubleArrayDataInMemoryStore.
- *
- * @author S71955
- */
-public class DoubleArrayDataInMemoryStore extends AbstractDoubleArrayDataStore {
-
-  // /**
-  // * DoubleArrayDataInMemoryStore.
-  // * @param size
-  // * @param elementSize
-  // * @param compressionModel
-  // */
-  // public DoubleArrayDataInMemoryStore(int size, int elementSize,
-  // ValueCompressionModel compressionModel)
-  // {
-  // super(size, elementSize, compressionModel);
-  // }
-  //
-  // /**
-  // * DoubleArrayDataInMemoryStore.
-  // * @param size
-  // * @param elementSize
-  // */
-  // public DoubleArrayDataInMemoryStore(int size, int elementSize)
-  // {
-  // super(size, elementSize);
-  // }
-
-  // /**
-  // * DoubleArrayDataInMemoryStore.
-  // * @param size
-  // * @param elementSize
-  // * @param compressionModel
-  // * @param measuresOffsetsArray
-  // * @param measuresLengthArray
-  // * @param fileName
-  // * @param fileHolder
-  // */
-  // public DoubleArrayDataInMemoryStore(int size, int elementSize,
-  // ValueCompressionModel compressionModel,
-  // long[] measuresOffsetsArray, int[] measuresLengthArray, String fileName,
-  // FileHolder fileHolder)
-  // {
-  // super(size, elementSize, compressionModel);
-  // UnCompressValue[] unCompValues = compressionModel.getUnCompressValues();
-  // if(null != unCompValues)
-  // {
-  // for(int i = 0;i < measuresLengthArray.length;i++)
-  // {
-  //
-  // values[i] = unCompValues[i].getNew();
-  // values[i].setValueInBytes(fileHolder.readByteArray(fileName,
-  // measuresOffsetsArray[i],
-  // measuresLengthArray[i]));
-  // }
-  // }
-  // }
-
-  /**
-   * DoubleArrayDataInMemoryStore.
-   *
-   * @param size
-   * @param elementSize
-   * @param compressionModel
-   * @param measuresOffsetsArray
-   * @param measuresLengthArray
-   * @param fileName
-   * @param fileHolder
-   */
-  public DoubleArrayDataInMemoryStore(ValueCompressionModel compressionModel,
-      long[] measuresOffsetsArray, int[] measuresLengthArray, String fileName,
-      FileHolder fileHolder) {
-    super(compressionModel);
-    if (null != compressionModel) {
-      UnCompressValue[] unCompValues = compressionModel.getUnCompressValues();
-      if (null != unCompValues) {
-        for (int i = 0; i < measuresLengthArray.length; i++) {
-
-          values[i] = unCompValues[i].getNew();
-          values[i].setValueInBytes(
-              fileHolder.readByteArray(fileName, measuresOffsetsArray[i], measuresLengthArray[i]));
-        }
-      }
-    }
-  }
-
-  /**
-   * DoubleArrayDataInMemoryStore.
-   *
-   * @param size
-   * @param elementSize
-   * @param compressionModel
-   * @param measuresOffsetsArray
-   * @param measuresLengthArray
-   * @param fileName
-   * @param fileHolder
-   */
-  public DoubleArrayDataInMemoryStore(ValueCompressionModel compressionModel) {
-    super(compressionModel);
-  }
-
-  @Override public MeasureDataWrapper getBackData(int[] cols, FileHolder fileHolder) {
-    if (null == compressionModel) {
-      return null;
-    }
-    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
-    if (null == cols) {
-      for (int i = 0; i < vals.length; i++) {
-        vals[i] = values[i]
-            .getValues(compressionModel.getDecimal()[i], compressionModel.getMaxValue()[i]);
-      }
-    } else {
-      for (int i = 0; i < cols.length; i++) {
-        vals[cols[i]] = values[cols[i]].getValues(compressionModel.getDecimal()[cols[i]],
-            compressionModel.getMaxValue()[cols[i]]);
-      }
-    }
-    // return new CompressedDataMeasureDataWrapper(values,
-    // compressionModel.getDecimal(), compressionModel.getMaxValue());
-    return new CompressedDataMeasureDataWrapper(vals);
-  }
-
-  @Override public MeasureDataWrapper getBackData(int cols, FileHolder fileHolder) {
-    if (null == compressionModel) {
-      return null;
-    }
-    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
-
-    vals[cols] = values[cols]
-        .getValues(compressionModel.getDecimal()[cols], compressionModel.getMaxValue()[cols]);
-    return new CompressedDataMeasureDataWrapper(vals);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/AbstractColumnarKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/AbstractColumnarKeyStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/AbstractColumnarKeyStore.java
deleted file mode 100644
index 9aa87d1..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/AbstractColumnarKeyStore.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.columnar;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStore;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-
-public abstract class AbstractColumnarKeyStore implements ColumnarKeyStore {
-
-  /**
-   * compressor will be used to compress the data
-   */
-  protected static final Compressor<byte[]> COMPRESSOR =
-      SnappyCompression.SnappyByteCompression.INSTANCE;
-
-  protected ColumnarKeyStoreInfo columnarStoreInfo;
-
-  protected byte[][] columnarKeyBlockDataIndex;
-
-  protected byte[][] columnarKeyBlockData;
-
-  protected Map<Integer, Integer> mapOfColumnIndexAndColumnBlockIndex;
-
-  protected Map<Integer, Integer> mapOfAggDataIndex;
-
-  protected byte[][] columnarUniqueblockKeyBlockIndex;
-
-  public AbstractColumnarKeyStore(ColumnarKeyStoreInfo columnarStoreInfo, boolean isInMemory,
-      FileHolder fileHolder) {
-    this.columnarStoreInfo = columnarStoreInfo;
-    this.mapOfColumnIndexAndColumnBlockIndex =
-        new HashMap<Integer, Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    this.mapOfAggDataIndex =
-        new HashMap<Integer, Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    int index = 0;
-    for (int i = 0; i < this.columnarStoreInfo.getIsSorted().length; i++) {
-      if (!this.columnarStoreInfo.getIsSorted()[i]) {
-        this.mapOfColumnIndexAndColumnBlockIndex.put(i, index++);
-      }
-    }
-    index = 0;
-    for (int i = 0; i < this.columnarStoreInfo.getAggKeyBlock().length; i++) {
-      if (this.columnarStoreInfo.getAggKeyBlock()[i]) {
-        mapOfAggDataIndex.put(i, index++);
-      }
-    }
-    if (isInMemory) {
-      this.columnarKeyBlockData = new byte[this.columnarStoreInfo.getIsSorted().length][];
-      this.columnarKeyBlockDataIndex = new byte[this.mapOfColumnIndexAndColumnBlockIndex.size()][];
-      this.columnarUniqueblockKeyBlockIndex = new byte[this.mapOfAggDataIndex.size()][];
-      for (int i = 0; i < columnarStoreInfo.getSizeOfEachBlock().length; i++) {
-        columnarKeyBlockData[i] = fileHolder.readByteArray(columnarStoreInfo.getFilePath(),
-            columnarStoreInfo.getKeyBlockOffsets()[i], columnarStoreInfo.getKeyBlockLengths()[i]);
-
-        if (!this.columnarStoreInfo.getIsSorted()[i]) {
-          this.columnarKeyBlockDataIndex[mapOfColumnIndexAndColumnBlockIndex.get(i)] = fileHolder
-              .readByteArray(columnarStoreInfo.getFilePath(),
-                  columnarStoreInfo.getKeyBlockIndexOffsets()[mapOfColumnIndexAndColumnBlockIndex
-                      .get(i)],
-                  columnarStoreInfo.getKeyBlockIndexLength()[mapOfColumnIndexAndColumnBlockIndex
-                      .get(i)]);
-        }
-
-        if (this.columnarStoreInfo.getAggKeyBlock()[i]) {
-          this.columnarUniqueblockKeyBlockIndex[mapOfAggDataIndex.get(i)] = fileHolder
-              .readByteArray(columnarStoreInfo.getFilePath(),
-                  columnarStoreInfo.getDataIndexMapOffsets()[mapOfAggDataIndex.get(i)],
-                  columnarStoreInfo.getDataIndexMapLength()[mapOfAggDataIndex.get(i)]);
-        }
-      }
-    }
-  }
-
-  protected int[] getColumnIndexForNonFilter(int[] columnIndex) {
-    int[] columnIndexTemp = new int[columnIndex.length];
-
-    for (int i = 0; i < columnIndex.length; i++) {
-      columnIndexTemp[columnIndex[i]] = i;
-    }
-    return columnIndexTemp;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarFileKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarFileKeyStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarFileKeyStore.java
deleted file mode 100644
index 97547e8..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarFileKeyStore.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.columnar.compressed;
-
-import java.util.List;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreMetadata;
-import org.carbondata.core.datastorage.store.columnar.UnBlockIndexer;
-import org.carbondata.core.datastorage.store.impl.key.columnar.AbstractColumnarKeyStore;
-import org.carbondata.core.util.CarbonUtil;
-
-public class CompressedColumnarFileKeyStore extends AbstractColumnarKeyStore {
-
-  public CompressedColumnarFileKeyStore(ColumnarKeyStoreInfo columnarStoreInfo) {
-    super(columnarStoreInfo, false, null);
-  }
-
-  @Override public ColumnarKeyStoreDataHolder[] getUnCompressedKeyArray(FileHolder fileHolder,
-      int[] blockIndex, boolean[] needCompressedData, int[] noDictionaryColIndexes) {
-    ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolders =
-        new ColumnarKeyStoreDataHolder[blockIndex.length];
-
-    for (int i = 0; i < columnarKeyStoreDataHolders.length; i++) {
-      byte[] columnarKeyBlockData = null;
-      int[] columnKeyBlockIndex = null;
-      int[] columnKeyBlockReverseIndexes = null;
-      ColumnarKeyStoreMetadata columnarKeyStoreMetadata = null;
-      int[] dataIndex = null;
-      boolean isUnCompressed = true;
-      columnarKeyBlockData = COMPRESSOR.unCompress(fileHolder
-          .readByteArray(columnarStoreInfo.getFilePath(),
-              columnarStoreInfo.getKeyBlockOffsets()[blockIndex[i]],
-              columnarStoreInfo.getKeyBlockLengths()[blockIndex[i]]));
-      boolean isNoDictionaryBlock =
-          CompressedColumnarKeyStoreUtil.isNoDictionaryBlock(noDictionaryColIndexes, blockIndex[i]);
-      if (!isNoDictionaryBlock && this.columnarStoreInfo.getAggKeyBlock()[blockIndex[i]]) {
-        dataIndex = columnarStoreInfo.getNumberCompressor().unCompress(fileHolder
-            .readByteArray(columnarStoreInfo.getFilePath(),
-                columnarStoreInfo.getDataIndexMapOffsets()[mapOfAggDataIndex.get(blockIndex[i])],
-                columnarStoreInfo.getDataIndexMapLength()[mapOfAggDataIndex.get(blockIndex[i])]));
-        if (!needCompressedData[i]) {
-          columnarKeyBlockData = UnBlockIndexer.uncompressData(columnarKeyBlockData, dataIndex,
-              columnarStoreInfo.getSizeOfEachBlock()[blockIndex[i]]);
-          dataIndex = null;
-        } else {
-          isUnCompressed = false;
-        }
-      }
-      if (!columnarStoreInfo.getIsSorted()[blockIndex[i]]) {
-        columnKeyBlockIndex = CarbonUtil
-            .getUnCompressColumnIndex(columnarStoreInfo.getKeyBlockIndexLength()[blockIndex[i]],
-                fileHolder.readByteArray(columnarStoreInfo.getFilePath(),
-                    columnarStoreInfo.getKeyBlockIndexOffsets()[blockIndex[i]],
-                    columnarStoreInfo.getKeyBlockIndexLength()[blockIndex[i]]),
-                columnarStoreInfo.getNumberCompressor());
-        columnKeyBlockReverseIndexes = getColumnIndexForNonFilter(columnKeyBlockIndex);
-      }
-      //Since its an high cardinality dimension adding the direct surrogates as part of
-      //columnarKeyStoreMetadata so that later it will be used with bytearraywrapper instance.
-      if (isNoDictionaryBlock) {
-        columnarKeyStoreMetadata = new ColumnarKeyStoreMetadata(0);
-        columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
-        columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndexes);
-        columnarKeyStoreMetadata.setNoDictionaryValColumn(true);
-        columnarKeyStoreMetadata.setUnCompressed(true);
-        columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[i]]);
-        //System is reading the direct surrogates data from byte array which contains both
-        // length and the direct surrogates data
-        List<byte[]> noDictionaryValBasedKeyBlockData = CompressedColumnarKeyStoreUtil
-            .readColumnarKeyBlockDataForNoDictionaryCols(columnarKeyBlockData);
-        columnarKeyStoreDataHolders[i] =
-            new ColumnarKeyStoreDataHolder(noDictionaryValBasedKeyBlockData,
-                columnarKeyStoreMetadata);
-      } else {
-        columnarKeyStoreMetadata =
-            new ColumnarKeyStoreMetadata(columnarStoreInfo.getSizeOfEachBlock()[blockIndex[i]]);
-        columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
-        columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[i]]);
-        columnarKeyStoreMetadata.setDataIndex(dataIndex);
-        columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndexes);
-        columnarKeyStoreMetadata.setUnCompressed(isUnCompressed);
-        columnarKeyStoreDataHolders[i] =
-            new ColumnarKeyStoreDataHolder(columnarKeyBlockData, columnarKeyStoreMetadata);
-      }
-    }
-    return columnarKeyStoreDataHolders;
-  }
-
-  @Override
-  public ColumnarKeyStoreDataHolder getUnCompressedKeyArray(FileHolder fileHolder, int blockIndex,
-      boolean needCompressedData, int[] noDictionaryColIndexes) {
-    byte[] columnarKeyBlockData = null;
-    int[] columnKeyBlockIndex = null;
-    int[] columnKeyBlockReverseIndex = null;
-    ColumnarKeyStoreMetadata columnarKeyStoreMetadata = null;
-    int[] dataIndex = null;
-    boolean isUnCompressed = true;
-    columnarKeyBlockData = COMPRESSOR.unCompress(fileHolder
-        .readByteArray(columnarStoreInfo.getFilePath(),
-            columnarStoreInfo.getKeyBlockOffsets()[blockIndex],
-            columnarStoreInfo.getKeyBlockLengths()[blockIndex]));
-    boolean isNoDictionaryBlock =
-        CompressedColumnarKeyStoreUtil.isNoDictionaryBlock(noDictionaryColIndexes, blockIndex);
-    if (!isNoDictionaryBlock && this.columnarStoreInfo.getAggKeyBlock()[blockIndex]) {
-      dataIndex = columnarStoreInfo.getNumberCompressor().unCompress(fileHolder
-          .readByteArray(columnarStoreInfo.getFilePath(),
-              columnarStoreInfo.getDataIndexMapOffsets()[mapOfAggDataIndex.get(blockIndex)],
-              columnarStoreInfo.getDataIndexMapLength()[mapOfAggDataIndex.get(blockIndex)]));
-      if (!needCompressedData) {
-        columnarKeyBlockData = UnBlockIndexer.uncompressData(columnarKeyBlockData, dataIndex,
-            columnarStoreInfo.getSizeOfEachBlock()[blockIndex]);
-        dataIndex = null;
-      } else {
-        isUnCompressed = false;
-      }
-    }
-    if (!columnarStoreInfo.getIsSorted()[blockIndex]) {
-      columnKeyBlockIndex = CarbonUtil
-          .getUnCompressColumnIndex(columnarStoreInfo.getKeyBlockIndexLength()[blockIndex],
-              fileHolder.readByteArray(columnarStoreInfo.getFilePath(),
-                  columnarStoreInfo.getKeyBlockIndexOffsets()[blockIndex],
-                  columnarStoreInfo.getKeyBlockIndexLength()[blockIndex]),
-              columnarStoreInfo.getNumberCompressor());
-      columnKeyBlockReverseIndex = getColumnIndexForNonFilter(columnKeyBlockIndex);
-    }
-    //Since its an high cardinality dimension, For filter queries.
-    if (isNoDictionaryBlock) {
-      columnarKeyStoreMetadata = new ColumnarKeyStoreMetadata(0);
-      ColumnarKeyStoreDataHolder columnarKeyStoreDataHolders = CompressedColumnarKeyStoreUtil
-          .createColumnarKeyStoreMetadataForHCDims(blockIndex, columnarKeyBlockData,
-              columnKeyBlockIndex, columnKeyBlockReverseIndex, columnarStoreInfo);
-      new ColumnarKeyStoreDataHolder(columnarKeyBlockData, columnarKeyStoreMetadata);
-      return columnarKeyStoreDataHolders;
-    }
-    columnarKeyStoreMetadata =
-        new ColumnarKeyStoreMetadata(columnarStoreInfo.getSizeOfEachBlock()[blockIndex]);
-    columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
-    columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex]);
-    columnarKeyStoreMetadata.setDataIndex(dataIndex);
-    columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndex);
-    columnarKeyStoreMetadata.setUnCompressed(isUnCompressed);
-
-    ColumnarKeyStoreDataHolder columnarKeyStoreDataHolders =
-        new ColumnarKeyStoreDataHolder(columnarKeyBlockData, columnarKeyStoreMetadata);
-    return columnarKeyStoreDataHolders;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarInMemoryStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarInMemoryStore.java
deleted file mode 100644
index 0bb73b2..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarInMemoryStore.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.columnar.compressed;
-
-import java.util.List;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreMetadata;
-import org.carbondata.core.datastorage.store.columnar.UnBlockIndexer;
-import org.carbondata.core.datastorage.store.impl.key.columnar.AbstractColumnarKeyStore;
-import org.carbondata.core.util.CarbonUtil;
-
-public class CompressedColumnarInMemoryStore extends AbstractColumnarKeyStore {
-
-  public CompressedColumnarInMemoryStore(ColumnarKeyStoreInfo columnarStoreInfo,
-      FileHolder fileHolder) {
-    super(columnarStoreInfo, true, fileHolder);
-  }
-
-  @Override public ColumnarKeyStoreDataHolder[] getUnCompressedKeyArray(FileHolder fileHolder,
-      int[] blockIndex, boolean[] needCompressedData, int[] noDictionaryColIndexes) {
-    ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolders =
-        new ColumnarKeyStoreDataHolder[blockIndex.length];
-    for (int i = 0; i < columnarKeyStoreDataHolders.length; i++) {
-      byte[] columnarKeyBlockDataTemp = null;
-      int[] columnKeyBlockIndex = null;
-      int[] columnKeyBlockReverseIndexes = null;
-      ColumnarKeyStoreMetadata columnarKeyStoreMetadata = null;
-      int columnarKeyBlockIndex = 0;
-      int[] dataIndex = null;
-      boolean isUnCompressed = true;
-      columnarKeyBlockDataTemp = COMPRESSOR.unCompress(columnarKeyBlockData[blockIndex[i]]);
-      boolean isNoDictionaryBlock =
-          CompressedColumnarKeyStoreUtil.isNoDictionaryBlock(noDictionaryColIndexes, blockIndex[i]);
-      if (!isNoDictionaryBlock && this.columnarStoreInfo.getAggKeyBlock()[blockIndex[i]]) {
-        dataIndex = columnarStoreInfo.getNumberCompressor()
-            .unCompress(columnarUniqueblockKeyBlockIndex[mapOfAggDataIndex.get(blockIndex[i])]);
-        if (!needCompressedData[i]) {
-          columnarKeyBlockDataTemp = UnBlockIndexer
-              .uncompressData(columnarKeyBlockDataTemp, dataIndex,
-                  columnarStoreInfo.getSizeOfEachBlock()[blockIndex[i]]);
-          dataIndex = null;
-        } else {
-          isUnCompressed = false;
-        }
-      }
-      if (!columnarStoreInfo.getIsSorted()[blockIndex[i]]) {
-        columnarKeyBlockIndex = mapOfColumnIndexAndColumnBlockIndex.get(blockIndex[i]);
-        columnKeyBlockIndex = CarbonUtil.getUnCompressColumnIndex(
-            columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex],
-            columnarKeyBlockDataIndex[columnarKeyBlockIndex],
-            columnarStoreInfo.getNumberCompressor());
-        columnKeyBlockReverseIndexes = getColumnIndexForNonFilter(columnKeyBlockIndex);
-      }
-      if (isNoDictionaryBlock) {
-        columnarKeyStoreMetadata = new ColumnarKeyStoreMetadata(0);
-        columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
-        columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndexes);
-        columnarKeyStoreMetadata.setNoDictionaryValColumn(true);
-        columnarKeyStoreMetadata.setUnCompressed(true);
-        columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[i]]);
-        //System is reading the direct surrogates data from byte array which contains both
-        // length and the direct surrogates data
-        List<byte[]> noDictionaryValBasedKeyBlockData = CompressedColumnarKeyStoreUtil
-            .readColumnarKeyBlockDataForNoDictionaryCols(columnarKeyBlockDataTemp);
-        columnarKeyStoreDataHolders[i] =
-            new ColumnarKeyStoreDataHolder(noDictionaryValBasedKeyBlockData,
-                columnarKeyStoreMetadata);
-      }
-      columnarKeyStoreMetadata =
-          new ColumnarKeyStoreMetadata(columnarStoreInfo.getSizeOfEachBlock()[blockIndex[i]]);
-      columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
-      columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[i]]);
-      columnarKeyStoreMetadata.setDataIndex(dataIndex);
-      columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndexes);
-      columnarKeyStoreMetadata.setUnCompressed(isUnCompressed);
-      columnarKeyStoreDataHolders[i] =
-          new ColumnarKeyStoreDataHolder(columnarKeyBlockDataTemp, columnarKeyStoreMetadata);
-    }
-    return columnarKeyStoreDataHolders;
-  }
-
-  @Override
-  public ColumnarKeyStoreDataHolder getUnCompressedKeyArray(FileHolder fileHolder, int blockIndex,
-      boolean needCompressedData, int[] noDictionaryVals) {
-
-    byte[] columnarKeyBlockDataTemp = null;
-    int[] columnKeyBlockIndex = null;
-    int[] columnKeyBlockReverseIndex = null;
-    ColumnarKeyStoreMetadata columnarKeyStoreMetadata = null;
-    int columnarKeyBlockIndex = 0;
-    int[] dataIndex = null;
-    boolean isUnCompressed = true;
-    columnarKeyBlockDataTemp = COMPRESSOR.unCompress(columnarKeyBlockData[blockIndex]);
-    boolean isNoDictionaryBlock =
-        CompressedColumnarKeyStoreUtil.isNoDictionaryBlock(noDictionaryVals, blockIndex);
-    if (!isNoDictionaryBlock && this.columnarStoreInfo.getAggKeyBlock()[blockIndex]) {
-      dataIndex = columnarStoreInfo.getNumberCompressor()
-          .unCompress(columnarUniqueblockKeyBlockIndex[mapOfAggDataIndex.get(blockIndex)]);
-      if (!needCompressedData) {
-        columnarKeyBlockDataTemp = UnBlockIndexer
-            .uncompressData(columnarKeyBlockDataTemp, dataIndex,
-                columnarStoreInfo.getSizeOfEachBlock()[blockIndex]);
-        dataIndex = null;
-      } else {
-        isUnCompressed = false;
-      }
-    }
-    if (!columnarStoreInfo.getIsSorted()[blockIndex]) {
-      columnarKeyBlockIndex = mapOfColumnIndexAndColumnBlockIndex.get(blockIndex);
-      columnKeyBlockIndex = CarbonUtil.getUnCompressColumnIndex(
-          columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex],
-          columnarKeyBlockDataIndex[columnarKeyBlockIndex],
-          columnarStoreInfo.getNumberCompressor());
-      columnKeyBlockReverseIndex = getColumnIndexForNonFilter(columnKeyBlockIndex);
-    }
-    if (isNoDictionaryBlock) {
-      ColumnarKeyStoreDataHolder colKeystoreDataHolders = CompressedColumnarKeyStoreUtil
-          .createColumnarKeyStoreMetadataForHCDims(blockIndex, columnarKeyBlockDataTemp,
-              columnKeyBlockIndex, columnKeyBlockReverseIndex, columnarStoreInfo);
-      return colKeystoreDataHolders;
-    }
-    columnarKeyStoreMetadata =
-        new ColumnarKeyStoreMetadata(columnarStoreInfo.getSizeOfEachBlock()[blockIndex]);
-    columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
-    columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex]);
-    columnarKeyStoreMetadata.setDataIndex(dataIndex);
-    columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndex);
-    columnarKeyStoreMetadata.setUnCompressed(isUnCompressed);
-    ColumnarKeyStoreDataHolder columnarKeyStoreDataHolders =
-        new ColumnarKeyStoreDataHolder(columnarKeyBlockDataTemp, columnarKeyStoreMetadata);
-    return columnarKeyStoreDataHolders;
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarKeyStoreUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarKeyStoreUtil.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarKeyStoreUtil.java
deleted file mode 100644
index 546eb5b..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/compressed/CompressedColumnarKeyStoreUtil.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.datastorage.store.impl.key.columnar.compressed;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreMetadata;
-
-/**
- * Utility helper class for managing the processing of columnar key store block.
- */
-public final class CompressedColumnarKeyStoreUtil {
-
-  private CompressedColumnarKeyStoreUtil() {
-
-  }
-
-  /**
-   * @param columnarKeyBlockData
-   * @param columnarKeyStoreMetadata
-   * @return
-   * @author s71955 The high cardinality dimensions rows will be send in byte
-   * array with its data length appended in the
-   * ColumnarKeyStoreDataHolder byte array since high cardinality dim
-   * data will not be part of MDKey/Surrogate keys. In this method the
-   * byte array will be scanned and the length which is stored in
-   * short will be removed.
-   */
-  public static List<byte[]> readColumnarKeyBlockDataForNoDictionaryCols(
-      byte[] columnarKeyBlockData) {
-    List<byte[]> columnarKeyBlockDataList = new ArrayList<byte[]>(50);
-    ByteBuffer noDictionaryValKeyStoreDataHolder = ByteBuffer.allocate(columnarKeyBlockData.length);
-    noDictionaryValKeyStoreDataHolder.put(columnarKeyBlockData);
-    noDictionaryValKeyStoreDataHolder.flip();
-    while (noDictionaryValKeyStoreDataHolder.hasRemaining()) {
-      short dataLength = noDictionaryValKeyStoreDataHolder.getShort();
-      byte[] noDictionaryValKeyData = new byte[dataLength];
-      noDictionaryValKeyStoreDataHolder.get(noDictionaryValKeyData);
-      columnarKeyBlockDataList.add(noDictionaryValKeyData);
-    }
-    return columnarKeyBlockDataList;
-
-  }
-
-  /**
-   * @param blockIndex
-   * @param columnarKeyBlockData
-   * @param columnKeyBlockIndex
-   * @param columnKeyBlockReverseIndex
-   * @param columnarStoreInfo
-   * @return
-   */
-  public static ColumnarKeyStoreDataHolder createColumnarKeyStoreMetadataForHCDims(int blockIndex,
-      byte[] columnarKeyBlockData, int[] columnKeyBlockIndex, int[] columnKeyBlockReverseIndex,
-      ColumnarKeyStoreInfo columnarStoreInfo) {
-    ColumnarKeyStoreMetadata columnarKeyStoreMetadata;
-    columnarKeyStoreMetadata = new ColumnarKeyStoreMetadata(0);
-    columnarKeyStoreMetadata.setNoDictionaryValColumn(true);
-    columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
-    columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndex);
-    columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex]);
-    columnarKeyStoreMetadata.setUnCompressed(true);
-    List<byte[]> noDictionaryValBasedKeyBlockData = CompressedColumnarKeyStoreUtil
-        .readColumnarKeyBlockDataForNoDictionaryCols(columnarKeyBlockData);
-    ColumnarKeyStoreDataHolder columnarKeyStoreDataHolders =
-        new ColumnarKeyStoreDataHolder(noDictionaryValBasedKeyBlockData, columnarKeyStoreMetadata);
-    return columnarKeyStoreDataHolders;
-  }
-
-  /**
-   * This API will determine whether the requested block index is a  No dictionary
-   * column index.
-   *
-   * @param noDictionaryColIndexes
-   * @param blockIndex
-   * @return
-   */
-  public static boolean isNoDictionaryBlock(int[] noDictionaryColIndexes, int blockIndex) {
-    if (null != noDictionaryColIndexes) {
-      for (int noDictionaryValIndex : noDictionaryColIndexes) {
-        if (noDictionaryValIndex == blockIndex) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarFileKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarFileKeyStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarFileKeyStore.java
deleted file mode 100644
index e014135..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarFileKeyStore.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.columnar.uncompressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreMetadata;
-import org.carbondata.core.datastorage.store.columnar.UnBlockIndexer;
-import org.carbondata.core.datastorage.store.impl.key.columnar.AbstractColumnarKeyStore;
-import org.carbondata.core.util.CarbonUtil;
-
-public class UnCompressedColumnarFileKeyStore extends AbstractColumnarKeyStore {
-
-  public UnCompressedColumnarFileKeyStore(ColumnarKeyStoreInfo columnarStoreInfo) {
-    super(columnarStoreInfo, false, null);
-  }
-
-  @Override public ColumnarKeyStoreDataHolder[] getUnCompressedKeyArray(FileHolder fileHolder,
-      int[] blockIndex, boolean[] needCompressedData, int[] noDictionaryColIndexes) {
-    ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolders =
-        new ColumnarKeyStoreDataHolder[blockIndex.length];
-    byte[] columnarKeyBlockData = null;
-    int[] columnKeyBlockIndex = null;
-    ColumnarKeyStoreMetadata columnarKeyStoreMetadata = null;
-    int columnarKeyBlockIndex = 0;
-    int[] dataIndex = null;
-    int[] columnKeyBlockReverseIndex = null;
-    for (int j = 0; j < columnarKeyStoreDataHolders.length; j++) {
-      columnarKeyBlockData = fileHolder.readByteArray(columnarStoreInfo.getFilePath(),
-          columnarStoreInfo.getKeyBlockOffsets()[blockIndex[j]],
-          columnarStoreInfo.getKeyBlockLengths()[blockIndex[j]]);
-      if (this.columnarStoreInfo.getAggKeyBlock()[blockIndex[j]]) {
-        dataIndex = columnarStoreInfo.getNumberCompressor().unCompress(fileHolder
-            .readByteArray(columnarStoreInfo.getFilePath(),
-                columnarStoreInfo.getDataIndexMapOffsets()[mapOfAggDataIndex.get(blockIndex[j])],
-                columnarStoreInfo.getDataIndexMapLength()[mapOfAggDataIndex.get(blockIndex[j])]));
-        if (!needCompressedData[j]) {
-          columnarKeyBlockData = UnBlockIndexer.uncompressData(columnarKeyBlockData, dataIndex,
-              columnarStoreInfo.getSizeOfEachBlock()[blockIndex[j]]);
-          dataIndex = null;
-        }
-      }
-      if (!columnarStoreInfo.getIsSorted()[blockIndex[j]]) {
-        columnarKeyBlockIndex = mapOfColumnIndexAndColumnBlockIndex.get(blockIndex[j]);
-        columnKeyBlockIndex = CarbonUtil.getUnCompressColumnIndex(
-            columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex], fileHolder
-                .readByteArray(columnarStoreInfo.getFilePath(),
-                    columnarStoreInfo.getKeyBlockIndexOffsets()[columnarKeyBlockIndex],
-                    columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex]),
-            columnarStoreInfo.getNumberCompressor());
-        columnKeyBlockReverseIndex = getColumnIndexForNonFilter(columnKeyBlockIndex);
-      }
-      columnarKeyStoreMetadata =
-          new ColumnarKeyStoreMetadata(columnarStoreInfo.getSizeOfEachBlock()[blockIndex[j]]);
-      columnarKeyStoreMetadata.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[j]]);
-      columnarKeyStoreMetadata.setColumnIndex(columnKeyBlockIndex);
-      columnarKeyStoreMetadata.setDataIndex(dataIndex);
-      columnarKeyStoreMetadata.setColumnReverseIndex(columnKeyBlockReverseIndex);
-      columnarKeyStoreDataHolders[j] =
-          new ColumnarKeyStoreDataHolder(columnarKeyBlockData, columnarKeyStoreMetadata);
-    }
-    return columnarKeyStoreDataHolders;
-  }
-
-  @Override
-  public ColumnarKeyStoreDataHolder getUnCompressedKeyArray(FileHolder fileHolder, int blockIndex,
-      boolean needCompressedData, int[] noDictionaryColIndexes) {
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarInMemoryStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarInMemoryStore.java
deleted file mode 100644
index e4b565b..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/columnar/uncompressed/UnCompressedColumnarInMemoryStore.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.columnar.uncompressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreDataHolder;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreInfo;
-import org.carbondata.core.datastorage.store.columnar.ColumnarKeyStoreMetadata;
-import org.carbondata.core.datastorage.store.impl.key.columnar.AbstractColumnarKeyStore;
-import org.carbondata.core.util.CarbonUtil;
-
-public class UnCompressedColumnarInMemoryStore extends AbstractColumnarKeyStore {
-
-  public UnCompressedColumnarInMemoryStore(ColumnarKeyStoreInfo columnarStoreInfo,
-      FileHolder fileHolder) {
-    super(columnarStoreInfo, true, fileHolder);
-  }
-
-  @Override public ColumnarKeyStoreDataHolder[] getUnCompressedKeyArray(FileHolder fileHolder,
-      int[] blockIndex, boolean[] needCompressedData, int[] noDictionaryColIndexes) {
-    int columnarKeyBlockIndex = 0;
-    int[] columnIndex = null;
-    ColumnarKeyStoreDataHolder[] columnarKeyStoreDataHolders =
-        new ColumnarKeyStoreDataHolder[blockIndex.length];
-    ColumnarKeyStoreMetadata columnarKeyStoreMetadataTemp = null;
-    for (int i = 0; i < columnarKeyStoreDataHolders.length; i++) {
-      columnarKeyStoreMetadataTemp = new ColumnarKeyStoreMetadata(0);
-      if (!columnarStoreInfo.getIsSorted()[blockIndex[i]]) {
-        columnarKeyBlockIndex = mapOfColumnIndexAndColumnBlockIndex.get(blockIndex[i]);
-        columnIndex = CarbonUtil.getUnCompressColumnIndex(
-            columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex], fileHolder
-                .readByteArray(columnarStoreInfo.getFilePath(),
-                    columnarStoreInfo.getKeyBlockIndexOffsets()[columnarKeyBlockIndex],
-                    columnarStoreInfo.getKeyBlockIndexLength()[columnarKeyBlockIndex]),
-            columnarStoreInfo.getNumberCompressor());
-        columnIndex = getColumnIndexForNonFilter(columnIndex);
-        columnarKeyStoreMetadataTemp.setColumnIndex(columnIndex);
-      }
-      columnarKeyStoreMetadataTemp.setSorted(columnarStoreInfo.getIsSorted()[blockIndex[i]]);
-      columnarKeyStoreDataHolders[i] =
-          new ColumnarKeyStoreDataHolder(columnarKeyBlockData[blockIndex[i]],
-              columnarKeyStoreMetadataTemp);
-    }
-    return columnarKeyStoreDataHolders;
-  }
-
-  @Override
-  public ColumnarKeyStoreDataHolder getUnCompressedKeyArray(FileHolder fileHolder, int blockIndex,
-      boolean needCompressedData, int[] noDictionaryVals) {
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/AbstractCompressedSingleArrayStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/AbstractCompressedSingleArrayStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/AbstractCompressedSingleArrayStore.java
deleted file mode 100644
index e90ad4e..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/AbstractCompressedSingleArrayStore.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.compressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.NodeKeyStore;
-import org.carbondata.core.datastorage.store.compression.Compressor;
-import org.carbondata.core.datastorage.store.compression.SnappyCompression;
-
-public abstract class AbstractCompressedSingleArrayStore implements NodeKeyStore {
-
-  /**
-   * compressor will be used to compress the data
-   */
-  protected static final Compressor<byte[]> COMPRESSOR =
-      SnappyCompression.SnappyByteCompression.INSTANCE;
-  /**
-   * size of each element
-   */
-  protected final int sizeOfEachElement;
-  /**
-   * data store which will hold the data
-   */
-  protected byte[] datastore;
-  /**
-   * total number of elements;
-   */
-  protected int totalNumberOfElements;
-
-  public AbstractCompressedSingleArrayStore(int size, int elementSize) {
-    this(size, elementSize, true);
-  }
-
-  public AbstractCompressedSingleArrayStore(int size, int elementSize, boolean createDataStore) {
-    this.sizeOfEachElement = elementSize;
-    this.totalNumberOfElements = size;
-    if (createDataStore) {
-      datastore = new byte[this.totalNumberOfElements * this.sizeOfEachElement];
-    }
-  }
-
-  /**
-   * This method will be used to insert key to store
-   */
-  @Override public void put(int index, byte[] value) {
-    System.arraycopy(value, 0, datastore, ((index) * sizeOfEachElement), sizeOfEachElement);
-  }
-
-  /**
-   * This method will be used to get the writable key array.
-   * writable key array will hold below information:
-   * <size of key array><key array>
-   * total length will be stored in 4 bytes+ key array length for key array
-   *
-   * @return writable array (compressed or normal)
-   */
-  @Override public byte[] getWritableKeyArray() {
-    // compress the data store
-    byte[] compressedKeys = COMPRESSOR.compress(datastore);
-    return compressedKeys;
-  }
-
-  /**
-   * This method will be used to get the actual key array present in the
-   * store .
-   * Here back array will be uncompress array
-   *
-   * @param fileHolder file holder will be used to read the file
-   * @return uncompressed keys
-   * will return uncompressed key
-   */
-  @Override public byte[] getBackArray(FileHolder fileHolder) {
-    return COMPRESSOR.unCompress(datastore);
-  }
-
-  /**
-   * This method will be used to get the key array based on index
-   *
-   * @param index      index in store
-   * @param fileHolder file holder will be used to read the file
-   * @return key
-   */
-  @Override public byte[] get(int index, FileHolder fileHolder) {
-    // uncompress the store data
-    byte[] unCompress = COMPRESSOR.unCompress(datastore);
-    // create new array of size of each element
-    byte[] copy = new byte[sizeOfEachElement];
-    // copy array for given index
-    // copy will done based on below calculation
-    // eg: index is 4 and size of each key is 6 then copy from 6*4= 24th
-    // index till 29th index
-    System.arraycopy(unCompress, ((index) * sizeOfEachElement), copy, 0, sizeOfEachElement);
-    return copy;
-  }
-
-  /**
-   * This method will clear the store and create the new empty store
-   */
-  @Override public void clear() {
-    datastore = new byte[this.totalNumberOfElements * this.sizeOfEachElement];
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyFileStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyFileStore.java
deleted file mode 100644
index cd3487d..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyFileStore.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.compressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-
-public class CompressedSingleArrayKeyFileStore extends AbstractCompressedSingleArrayStore {
-
-  /**
-   * offset, this will be used for seek position
-   */
-  private long offset;
-
-  /**
-   * fully qualified file path
-   */
-  private String filePath;
-
-  /**
-   * length to be read
-   */
-  private int length;
-
-  public CompressedSingleArrayKeyFileStore(int size, int elementSize, long offset, String filePath,
-      int length) {
-    super(size, elementSize, false);
-    this.offset = offset;
-    this.filePath = filePath;
-    this.length = length;
-  }
-
-  /**
-   * This method will be used to get the actual keys array present in the
-   * store . Here back array will be uncompress array. This method will first read
-   * the data from file based on offset and length then uncompress the array
-   * to get the actual array
-   *
-   * @param fileHolder file holder will be used to read the file
-   * @return uncompressed
-   * keys will return uncompressed key
-   */
-  @Override public byte[] getBackArray(FileHolder fileHolder) {
-    if (null != fileHolder) {
-      // read from file based on offset and index, fileholder will read that
-      // much byte from that offset, then uncompress and return
-      return COMPRESSOR.unCompress(fileHolder.readByteArray(filePath, offset, length));
-    } else {
-      return new byte[0];
-    }
-  }
-
-  /**
-   * This method will be used to get the key array based on index
-   * This method will first read
-   * the data from file based on offset and length then uncompress the array
-   * to get the actual array, then get the array for index and return
-   *
-   * @param index      index in store
-   * @param fileHolder file holder will be used to read the file
-   * @return key
-   */
-  @Override public byte[] get(int index, FileHolder fileHolder) {
-    // read from file based on offset and index, fileholder will read that
-    // much byte from that offset, then uncompress to get the actual array
-    byte[] unCompress = COMPRESSOR.unCompress(fileHolder.readByteArray(filePath, offset, length));
-    // create new array of size of each element
-    byte[] copy = new byte[sizeOfEachElement];
-    // copy array for given index
-    // copy will done based on below calculation
-    // eg: index is 4 and size of each key is 6 then copy from 6*4= 24th
-    // index till 29th index
-    System.arraycopy(unCompress, ((index) * sizeOfEachElement), copy, 0, sizeOfEachElement);
-    return copy;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyInMemoryStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyInMemoryStore.java
deleted file mode 100644
index f2d8699..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/compressed/CompressedSingleArrayKeyInMemoryStore.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.compressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-
-public class CompressedSingleArrayKeyInMemoryStore extends AbstractCompressedSingleArrayStore {
-  /**
-   * @param size
-   * @param elementSize
-   */
-  public CompressedSingleArrayKeyInMemoryStore(int size, int elementSize) {
-    super(size, elementSize);
-  }
-
-  /**
-   * @param size
-   * @param elementSize
-   * @param offset
-   * @param filePath
-   * @param fileHolder
-   * @param length
-   */
-  public CompressedSingleArrayKeyInMemoryStore(int size, int elementSize, long offset,
-      String filePath, FileHolder fileHolder, int length) {
-    this(size, elementSize);
-    datastore = fileHolder.readByteArray(filePath, offset, length);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/AbstractSingleArrayKeyStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/AbstractSingleArrayKeyStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/AbstractSingleArrayKeyStore.java
deleted file mode 100644
index 22b9f6e..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/AbstractSingleArrayKeyStore.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.uncompressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.NodeKeyStore;
-
-public abstract class AbstractSingleArrayKeyStore implements NodeKeyStore {
-
-  /**
-   * size of each element
-   */
-  protected final int sizeOfEachElement;
-  /**
-   * total number of elements
-   */
-  protected final int totalNumberOfElements;
-  /**
-   * data store which will hold the data
-   */
-  protected byte[] datastore;
-
-  public AbstractSingleArrayKeyStore(int size, int elementSize) {
-    this.sizeOfEachElement = elementSize;
-    this.totalNumberOfElements = size;
-    datastore = new byte[size * elementSize];
-  }
-
-  /**
-   * This method will be used to insert mdkey to store
-   *
-   * @param index index of mdkey
-   * @param value mdkey
-   */
-  @Override public void put(int index, byte[] value) {
-    System.arraycopy(value, 0, datastore, ((index) * sizeOfEachElement), sizeOfEachElement);
-  }
-
-  /**
-   * This method will be used to get the writable key array.
-   * writable key array will hold below information:
-   * <size of key array><key array>
-   * total length will be stored in 4 bytes+ key array length for key array
-   *
-   * @return writable array
-   */
-  @Override public byte[] getWritableKeyArray() {
-    // create and allocate size for byte buffer
-    //  4 bytes for size of array(for array length) + size of array(for array)
-    return datastore;
-  }
-
-  /**
-   * This method will be used to get the actual key array present in the
-   * store.
-   *
-   * @param fileHolder file holder will be used to read the file
-   * @return uncompressed keys
-   * will return uncompressed key
-   */
-  @Override public byte[] getBackArray(FileHolder fileHolder) {
-    return datastore;
-  }
-
-  /**
-   * This method will be used to get the key array based on index
-   *
-   * @param index      index in store
-   * @param fileHolder file holder will be used to read the file
-   * @return key
-   */
-  @Override public byte[] get(int index, FileHolder fileHolder) {
-    // create new array of size of each element
-    byte[] copy = new byte[sizeOfEachElement];
-
-    // copy array for given index
-    // copy will done based on below calculation
-    // eg: index is 4 and size of each key is 6 then copy from 6*4= 24th
-    // index till 29th index
-    System.arraycopy(datastore, ((index) * sizeOfEachElement), copy, 0, sizeOfEachElement);
-    return copy;
-  }
-
-  /**
-   * This method will clear the store and create the new empty store
-   */
-  @Override public void clear() {
-    datastore = new byte[this.totalNumberOfElements * this.sizeOfEachElement];
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyFileStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyFileStore.java
deleted file mode 100644
index 2e73484..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyFileStore.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.uncompressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-
-public class SingleArrayKeyFileStore extends AbstractSingleArrayKeyStore {
-  /**
-   * offset, this will be used for seek position
-   */
-  private long offset;
-
-  /**
-   * fully qualified file path
-   */
-  private String filePath;
-
-  /**
-   * length to be read
-   */
-  private int length;
-
-  /**
-   * @param size
-   * @param elementSize
-   */
-  public SingleArrayKeyFileStore(int size, int elementSize) {
-    super(size, elementSize);
-  }
-
-  /**
-   * @param size
-   * @param elementSize
-   * @param offset
-   * @param filePath
-   * @param length
-   */
-  public SingleArrayKeyFileStore(int size, int elementSize, long offset, String filePath,
-      int length) {
-    this(size, elementSize);
-    this.offset = offset;
-    this.filePath = filePath;
-    this.length = length;
-    datastore = null;
-  }
-
-  /**
-   * This method will be used to get the actual keys array present in the
-   * store. This method will read
-   * the data from file based on offset and length then return the data read from file
-   *
-   * @param fileHolder file holder will be used to read the file
-   * @return uncompressed
-   * keys will return uncompressed key
-   */
-  @Override public byte[] getBackArray(FileHolder fileHolder) {
-    if (null != fileHolder) {
-      return fileHolder.readByteArray(filePath, offset, length);
-    } else {
-      return new byte[0];
-    }
-  }
-
-  /**
-   * This method will be used to get the key array based on index This method
-   * will first read the data from file based on offset and length then get
-   * the array for index and return
-   *
-   * @param index      index in store
-   * @param fileHolder file holder will be used to read the file
-   * @return key
-   */
-  @Override public byte[] get(int index, FileHolder fileHolder) {
-    // read from file based on offset and index, fileholder will read that
-    // much byte from that offset,
-    byte[] unCompress = fileHolder.readByteArray(filePath, offset, length);
-    // create new array of size of each element
-    byte[] copy = new byte[sizeOfEachElement];
-    // copy array for given index
-    // copy will done based on below calculation
-    // eg: index is 4 and size of each key is 6 then copy from 6*4= 24th
-    // index till 29th index
-    System.arraycopy(unCompress, ((index) * sizeOfEachElement), copy, 0, sizeOfEachElement);
-    return copy;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyInMemoryStore.java b/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyInMemoryStore.java
deleted file mode 100644
index 96ab337..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/store/impl/key/uncompressed/SingleArrayKeyInMemoryStore.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.store.impl.key.uncompressed;
-
-import org.carbondata.core.datastorage.store.FileHolder;
-
-public class SingleArrayKeyInMemoryStore extends AbstractSingleArrayKeyStore {
-
-  public SingleArrayKeyInMemoryStore(int size, int elementSize) {
-    super(size, elementSize);
-  }
-
-  public SingleArrayKeyInMemoryStore(int size, int elementSize, long offset, String filePath,
-      FileHolder fileHolder, int length) {
-    this(size, elementSize);
-    datastore = fileHolder.readByteArray(filePath, offset, length);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/datastorage/util/StoreFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/util/StoreFactory.java b/core/src/main/java/org/carbondata/core/datastorage/util/StoreFactory.java
deleted file mode 100644
index e8efa21..0000000
--- a/core/src/main/java/org/carbondata/core/datastorage/util/StoreFactory.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.datastorage.util;
-
-import org.carbondata.core.datastorage.store.NodeMeasureDataStore;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.impl.data.compressed.HeavyCompressedDoubleArrayDataInMemoryStore;
-import org.carbondata.core.datastorage.store.impl.data.uncompressed.DoubleArrayDataInMemoryStore;
-
-public final class StoreFactory {
-  /**
-   * value type.
-   */
-  private static StoreType valueType;
-
-  static {
-    valueType = StoreType.HEAVY_VALUE_COMPRESSION;
-  }
-
-  private StoreFactory() {
-
-  }
-
-  public static NodeMeasureDataStore createDataStore(ValueCompressionModel compressionModel) {
-    switch (valueType) {
-      case COMPRESSED_DOUBLE_ARRAY:
-        return new DoubleArrayDataInMemoryStore(compressionModel);
-
-      case HEAVY_VALUE_COMPRESSION:
-        return new HeavyCompressedDoubleArrayDataInMemoryStore(compressionModel);
-      default:
-        return new HeavyCompressedDoubleArrayDataInMemoryStore(compressionModel);
-    }
-  }
-
-  /**
-   * enum defined.
-   */
-  public enum StoreType {
-    COMPRESSED_SINGLE_ARRAY,
-    COMPRESSED_DOUBLE_ARRAY,
-    HEAVY_VALUE_COMPRESSION
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/keygenerator/KeyGenException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/keygenerator/KeyGenException.java b/core/src/main/java/org/carbondata/core/keygenerator/KeyGenException.java
deleted file mode 100644
index 56d5def..0000000
--- a/core/src/main/java/org/carbondata/core/keygenerator/KeyGenException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.keygenerator;
-
-/**
- * It can be thrown while generating the key.
- */
-public class KeyGenException extends Exception {
-
-  private static final long serialVersionUID = 3105132151795358241L;
-
-  public KeyGenException() {
-    super();
-  }
-
-  public KeyGenException(Exception e) {
-    super(e);
-  }
-
-  public KeyGenException(Exception e, String msg) {
-    super(msg, e);
-  }
-
-  public KeyGenException(String msg) {
-    super(msg);
-  }
-
-}



[40/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/AbstractDFSCarbonFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/AbstractDFSCarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/AbstractDFSCarbonFile.java
new file mode 100644
index 0000000..26af405
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/AbstractDFSCarbonFile.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.filesystem;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+public abstract  class AbstractDFSCarbonFile implements CarbonFile {
+  /**
+   * LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(AbstractDFSCarbonFile.class.getName());
+  protected FileStatus fileStatus;
+  protected FileSystem fs;
+
+  public AbstractDFSCarbonFile(String filePath) {
+    filePath = filePath.replace("\\", "/");
+    Path path = new Path(filePath);
+    try {
+      fs = path.getFileSystem(FileFactory.getConfiguration());
+      fileStatus = fs.getFileStatus(path);
+    } catch (IOException e) {
+      LOGGER.error("Exception occured:" + e.getMessage());
+    }
+  }
+
+  public AbstractDFSCarbonFile(Path path) {
+    try {
+      fs = path.getFileSystem(FileFactory.getConfiguration());
+      fileStatus = fs.getFileStatus(path);
+    } catch (IOException e) {
+      LOGGER.error("Exception occured:" + e.getMessage());
+    }
+  }
+
+  public AbstractDFSCarbonFile(FileStatus fileStatus) {
+    this.fileStatus = fileStatus;
+  }
+
+  @Override public boolean createNewFile() {
+    Path path = fileStatus.getPath();
+    try {
+      return fs.createNewFile(path);
+    } catch (IOException e) {
+      return false;
+    }
+  }
+
+  @Override public String getAbsolutePath() {
+    return fileStatus.getPath().toString();
+  }
+
+  @Override public String getName() {
+    return fileStatus.getPath().getName();
+  }
+
+  @Override public boolean isDirectory() {
+    return fileStatus.isDirectory();
+  }
+
+  @Override public boolean exists() {
+    try {
+      if (null != fileStatus) {
+        fs = fileStatus.getPath().getFileSystem(FileFactory.getConfiguration());
+        return fs.exists(fileStatus.getPath());
+      }
+    } catch (IOException e) {
+      LOGGER.error("Exception occured:" + e.getMessage());
+    }
+    return false;
+  }
+
+  @Override public String getCanonicalPath() {
+    return getAbsolutePath();
+  }
+
+  @Override public String getPath() {
+    return getAbsolutePath();
+  }
+
+  @Override public long getSize() {
+    return fileStatus.getLen();
+  }
+
+  public boolean renameTo(String changetoName) {
+    FileSystem fs;
+    try {
+      fs = fileStatus.getPath().getFileSystem(FileFactory.getConfiguration());
+      return fs.rename(fileStatus.getPath(), new Path(changetoName));
+    } catch (IOException e) {
+      LOGGER.error("Exception occured:" + e.getMessage());
+      return false;
+    }
+  }
+
+  public boolean delete() {
+    FileSystem fs;
+    try {
+      fs = fileStatus.getPath().getFileSystem(FileFactory.getConfiguration());
+      return fs.delete(fileStatus.getPath(), true);
+    } catch (IOException e) {
+      LOGGER.error("Exception occured:" + e.getMessage());
+      return false;
+    }
+  }
+
+  @Override public long getLastModifiedTime() {
+    return fileStatus.getModificationTime();
+  }
+
+  @Override public boolean setLastModifiedTime(long timestamp) {
+    try {
+      fs.setTimes(fileStatus.getPath(), timestamp, timestamp);
+    } catch (IOException e) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * This method will delete the data in file data from a given offset
+   */
+  @Override public boolean truncate(String fileName, long validDataEndOffset) {
+    DataOutputStream dataOutputStream = null;
+    DataInputStream dataInputStream = null;
+    boolean fileTruncatedSuccessfully = false;
+    // if bytes to read less than 1024 then buffer size should be equal to the given offset
+    int bufferSize = validDataEndOffset > CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR ?
+        CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR :
+        (int) validDataEndOffset;
+    // temporary file name
+    String tempWriteFilePath = fileName + CarbonCommonConstants.TEMPWRITEFILEEXTENSION;
+    FileFactory.FileType fileType = FileFactory.getFileType(fileName);
+    try {
+      CarbonFile tempFile = null;
+      // delete temporary file if it already exists at a given path
+      if (FileFactory.isFileExist(tempWriteFilePath, fileType)) {
+        tempFile = FileFactory.getCarbonFile(tempWriteFilePath, fileType);
+        tempFile.delete();
+      }
+      // create new temporary file
+      FileFactory.createNewFile(tempWriteFilePath, fileType);
+      tempFile = FileFactory.getCarbonFile(tempWriteFilePath, fileType);
+      byte[] buff = new byte[bufferSize];
+      dataInputStream = FileFactory.getDataInputStream(fileName, fileType);
+      // read the data
+      int read = dataInputStream.read(buff, 0, buff.length);
+      dataOutputStream = FileFactory.getDataOutputStream(tempWriteFilePath, fileType);
+      dataOutputStream.write(buff, 0, read);
+      long remaining = validDataEndOffset - read;
+      // anytime we should not cross the offset to be read
+      while (remaining > 0) {
+        if (remaining > bufferSize) {
+          buff = new byte[bufferSize];
+        } else {
+          buff = new byte[(int) remaining];
+        }
+        read = dataInputStream.read(buff, 0, buff.length);
+        dataOutputStream.write(buff, 0, read);
+        remaining = remaining - read;
+      }
+      CarbonUtil.closeStreams(dataInputStream, dataOutputStream);
+      // rename the temp file to original file
+      tempFile.renameForce(fileName);
+      fileTruncatedSuccessfully = true;
+    } catch (IOException e) {
+      LOGGER.error("Exception occured while truncating the file " + e.getMessage());
+    } finally {
+      CarbonUtil.closeStreams(dataOutputStream, dataInputStream);
+    }
+    return fileTruncatedSuccessfully;
+  }
+
+  /**
+   * This method will be used to check whether a file has been modified or not
+   *
+   * @param fileTimeStamp time to be compared with latest timestamp of file
+   * @param endOffset     file length to be compared with current length of file
+   * @return
+   */
+  @Override public boolean isFileModified(long fileTimeStamp, long endOffset) {
+    boolean isFileModified = false;
+    if (getLastModifiedTime() > fileTimeStamp || getSize() > endOffset) {
+      isFileModified = true;
+    }
+    return isFileModified;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/CarbonFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/CarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/CarbonFile.java
new file mode 100644
index 0000000..642055b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/CarbonFile.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.filesystem;
+
+public interface CarbonFile {
+
+  String getAbsolutePath();
+
+  CarbonFile[] listFiles(CarbonFileFilter fileFilter);
+
+  CarbonFile[] listFiles();
+
+  String getName();
+
+  boolean isDirectory();
+
+  boolean exists();
+
+  String getCanonicalPath();
+
+  CarbonFile getParentFile();
+
+  String getPath();
+
+  long getSize();
+
+  boolean renameTo(String changetoName);
+
+  boolean renameForce(String changetoName);
+
+  boolean delete();
+
+  boolean createNewFile();
+
+  long getLastModifiedTime();
+
+  boolean setLastModifiedTime(long timestamp);
+
+  boolean truncate(String fileName, long validDataEndOffset);
+
+  /**
+   * This method will be used to check whether a file has been modified or not
+   *
+   * @param fileTimeStamp time to be compared with latest timestamp of file
+   * @param endOffset     file length to be compared with current length of file
+   * @return
+   */
+  boolean isFileModified(long fileTimeStamp, long endOffset);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/CarbonFileFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/CarbonFileFilter.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/CarbonFileFilter.java
new file mode 100644
index 0000000..7db3b2b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/CarbonFileFilter.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.filesystem;
+
+public interface CarbonFileFilter {
+  boolean accept(CarbonFile file);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/HDFSCarbonFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/HDFSCarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/HDFSCarbonFile.java
new file mode 100644
index 0000000..ebe18e4
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/HDFSCarbonFile.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.filesystem;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+
+public class HDFSCarbonFile extends AbstractDFSCarbonFile {
+  /**
+   * LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(HDFSCarbonFile.class.getName());
+
+  public HDFSCarbonFile(String filePath) {
+    super(filePath);
+  }
+
+  public HDFSCarbonFile(Path path) {
+    super(path);
+  }
+
+  public HDFSCarbonFile(FileStatus fileStatus) {
+    super(fileStatus);
+  }
+
+  /**
+   * @param listStatus
+   * @return
+   */
+  private CarbonFile[] getFiles(FileStatus[] listStatus) {
+    if (listStatus == null) {
+      return new CarbonFile[0];
+    }
+    CarbonFile[] files = new CarbonFile[listStatus.length];
+    for (int i = 0; i < files.length; i++) {
+      files[i] = new HDFSCarbonFile(listStatus[i]);
+    }
+    return files;
+  }
+
+  @Override
+  public CarbonFile[] listFiles() {
+    FileStatus[] listStatus = null;
+    try {
+      if (null != fileStatus && fileStatus.isDirectory()) {
+        Path path = fileStatus.getPath();
+        listStatus = path.getFileSystem(FileFactory.getConfiguration()).listStatus(path);
+      } else {
+        return null;
+      }
+    } catch (IOException e) {
+      LOGGER.error("Exception occured: " + e.getMessage());
+      return new CarbonFile[0];
+    }
+    return getFiles(listStatus);
+  }
+
+  @Override
+  public CarbonFile[] listFiles(final CarbonFileFilter fileFilter) {
+    CarbonFile[] files = listFiles();
+    if (files != null && files.length >= 1) {
+      List<CarbonFile> fileList = new ArrayList<CarbonFile>(files.length);
+      for (int i = 0; i < files.length; i++) {
+        if (fileFilter.accept(files[i])) {
+          fileList.add(files[i]);
+        }
+      }
+      if (fileList.size() >= 1) {
+        return fileList.toArray(new CarbonFile[fileList.size()]);
+      } else {
+        return new CarbonFile[0];
+      }
+    }
+    return files;
+  }
+
+  @Override
+  public CarbonFile getParentFile() {
+    Path parent = fileStatus.getPath().getParent();
+    return null == parent ? null : new HDFSCarbonFile(parent);
+  }
+
+  @Override
+  public boolean renameForce(String changetoName) {
+    FileSystem fs;
+    try {
+      fs = fileStatus.getPath().getFileSystem(FileFactory.getConfiguration());
+      if (fs instanceof DistributedFileSystem) {
+        ((DistributedFileSystem) fs).rename(fileStatus.getPath(), new Path(changetoName),
+            org.apache.hadoop.fs.Options.Rename.OVERWRITE);
+        return true;
+      } else {
+        return false;
+      }
+    } catch (IOException e) {
+      LOGGER.error("Exception occured: " + e.getMessage());
+      return false;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/LocalCarbonFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/LocalCarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/LocalCarbonFile.java
new file mode 100644
index 0000000..f46aeed
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/LocalCarbonFile.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.filesystem;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+import org.apache.hadoop.fs.Path;
+
+public class LocalCarbonFile implements CarbonFile {
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(LocalCarbonFile.class.getName());
+  private File file;
+
+  public LocalCarbonFile(String filePath) {
+    Path pathWithoutSchemeAndAuthority = Path.getPathWithoutSchemeAndAuthority(new Path(filePath));
+    file = new File(pathWithoutSchemeAndAuthority.toString());
+  }
+
+  public LocalCarbonFile(File file) {
+    this.file = file;
+  }
+
+  @Override public String getAbsolutePath() {
+    return file.getAbsolutePath();
+  }
+
+  @Override public CarbonFile[] listFiles(final CarbonFileFilter fileFilter) {
+    if (!file.isDirectory()) {
+      return null;
+    }
+
+    File[] files = file.listFiles(new FileFilter() {
+
+      @Override public boolean accept(File pathname) {
+        return fileFilter.accept(new LocalCarbonFile(pathname));
+      }
+    });
+
+    if (files == null) {
+      return new CarbonFile[0];
+    }
+
+    CarbonFile[] carbonFiles = new CarbonFile[files.length];
+
+    for (int i = 0; i < carbonFiles.length; i++) {
+      carbonFiles[i] = new LocalCarbonFile(files[i]);
+    }
+
+    return carbonFiles;
+  }
+
+  @Override public String getName() {
+    return file.getName();
+  }
+
+  @Override public boolean isDirectory() {
+    return file.isDirectory();
+  }
+
+  @Override public boolean exists() {
+    return file.exists();
+  }
+
+  @Override public String getCanonicalPath() {
+    try {
+      return file.getCanonicalPath();
+    } catch (IOException e) {
+      LOGGER
+          .error(e, "Exception occured" + e.getMessage());
+    }
+    return null;
+  }
+
+  @Override public CarbonFile getParentFile() {
+    return new LocalCarbonFile(file.getParentFile());
+  }
+
+  @Override public String getPath() {
+    return file.getPath();
+  }
+
+  @Override public long getSize() {
+    return file.length();
+  }
+
+  public boolean renameTo(String changetoName) {
+    return file.renameTo(new File(changetoName));
+  }
+
+  public boolean delete() {
+    return file.delete();
+  }
+
+  @Override public CarbonFile[] listFiles() {
+
+    if (!file.isDirectory()) {
+      return null;
+    }
+    File[] files = file.listFiles();
+    if (files == null) {
+      return new CarbonFile[0];
+    }
+    CarbonFile[] carbonFiles = new CarbonFile[files.length];
+    for (int i = 0; i < carbonFiles.length; i++) {
+      carbonFiles[i] = new LocalCarbonFile(files[i]);
+    }
+
+    return carbonFiles;
+
+  }
+
+  @Override public boolean createNewFile() {
+    try {
+      return file.createNewFile();
+    } catch (IOException e) {
+      return false;
+    }
+  }
+
+  @Override public long getLastModifiedTime() {
+    return file.lastModified();
+  }
+
+  @Override public boolean setLastModifiedTime(long timestamp) {
+    return file.setLastModified(timestamp);
+  }
+
+  /**
+   * This method will delete the data in file data from a given offset
+   */
+  @Override public boolean truncate(String fileName, long validDataEndOffset) {
+    FileChannel source = null;
+    FileChannel destination = null;
+    boolean fileTruncatedSuccessfully = false;
+    // temporary file name
+    String tempWriteFilePath = fileName + CarbonCommonConstants.TEMPWRITEFILEEXTENSION;
+    FileFactory.FileType fileType = FileFactory.getFileType(fileName);
+    try {
+      CarbonFile tempFile = null;
+      // delete temporary file if it already exists at a given path
+      if (FileFactory.isFileExist(tempWriteFilePath, fileType)) {
+        tempFile = FileFactory.getCarbonFile(tempWriteFilePath, fileType);
+        tempFile.delete();
+      }
+      // create new temporary file
+      FileFactory.createNewFile(tempWriteFilePath, fileType);
+      tempFile = FileFactory.getCarbonFile(tempWriteFilePath, fileType);
+      source = new FileInputStream(fileName).getChannel();
+      destination = new FileOutputStream(tempWriteFilePath).getChannel();
+      long read = destination.transferFrom(source, 0, validDataEndOffset);
+      long totalBytesRead = read;
+      long remaining = validDataEndOffset - totalBytesRead;
+      // read till required data offset is not reached
+      while (remaining > 0) {
+        read = destination.transferFrom(source, totalBytesRead, remaining);
+        totalBytesRead = totalBytesRead + read;
+        remaining = remaining - totalBytesRead;
+      }
+      CarbonUtil.closeStreams(source, destination);
+      // rename the temp file to original file
+      tempFile.renameForce(fileName);
+      fileTruncatedSuccessfully = true;
+    } catch (IOException e) {
+      LOGGER.error("Exception occured while truncating the file " + e.getMessage());
+    } finally {
+      CarbonUtil.closeStreams(source, destination);
+    }
+    return fileTruncatedSuccessfully;
+  }
+
+  /**
+   * This method will be used to check whether a file has been modified or not
+   *
+   * @param fileTimeStamp time to be compared with latest timestamp of file
+   * @param endOffset     file length to be compared with current length of file
+   * @return
+   */
+  @Override public boolean isFileModified(long fileTimeStamp, long endOffset) {
+    boolean isFileModified = false;
+    if (getLastModifiedTime() > fileTimeStamp || getSize() > endOffset) {
+      isFileModified = true;
+    }
+    return isFileModified;
+  }
+
+  @Override public boolean renameForce(String changetoName) {
+    File destFile = new File(changetoName);
+    if (destFile.exists()) {
+      if (destFile.delete()) {
+        return file.renameTo(new File(changetoName));
+      }
+    }
+
+    return file.renameTo(new File(changetoName));
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/ViewFSCarbonFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/ViewFSCarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/ViewFSCarbonFile.java
new file mode 100644
index 0000000..8f11b7a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/filesystem/ViewFSCarbonFile.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.datastorage.store.filesystem;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.viewfs.ViewFileSystem;
+
+public class ViewFSCarbonFile extends AbstractDFSCarbonFile {
+  /**
+   * LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(ViewFSCarbonFile.class.getName());
+
+  public ViewFSCarbonFile(String filePath) {
+    super(filePath);
+  }
+
+  public ViewFSCarbonFile(Path path) {
+    super(path);
+  }
+
+  public ViewFSCarbonFile(FileStatus fileStatus) {
+    super(fileStatus);
+  }
+
+  /**
+   * @param listStatus
+   * @return
+   */
+  private CarbonFile[] getFiles(FileStatus[] listStatus) {
+    if (listStatus == null) {
+      return new CarbonFile[0];
+    }
+    CarbonFile[] files = new CarbonFile[listStatus.length];
+    for (int i = 0; i < files.length; i++) {
+      files[i] = new ViewFSCarbonFile(listStatus[i]);
+    }
+    return files;
+  }
+
+  @Override
+  public CarbonFile[] listFiles() {
+    FileStatus[] listStatus = null;
+    try {
+      if (null != fileStatus && fileStatus.isDirectory()) {
+        Path path = fileStatus.getPath();
+        listStatus = path.getFileSystem(FileFactory.getConfiguration()).listStatus(path);
+      } else {
+        return null;
+      }
+    } catch (IOException ex) {
+      LOGGER.error("Exception occured" + ex.getMessage());
+      return new CarbonFile[0];
+    }
+    return getFiles(listStatus);
+  }
+
+  @Override
+  public CarbonFile[] listFiles(final CarbonFileFilter fileFilter) {
+    CarbonFile[] files = listFiles();
+    if (files != null && files.length >= 1) {
+      List<CarbonFile> fileList = new ArrayList<CarbonFile>(files.length);
+      for (int i = 0; i < files.length; i++) {
+        if (fileFilter.accept(files[i])) {
+          fileList.add(files[i]);
+        }
+      }
+      if (fileList.size() >= 1) {
+        return fileList.toArray(new CarbonFile[fileList.size()]);
+      } else {
+        return new CarbonFile[0];
+      }
+    }
+    return files;
+  }
+
+  @Override public CarbonFile getParentFile() {
+    Path parent = fileStatus.getPath().getParent();
+    return null == parent ? null : new ViewFSCarbonFile(parent);
+  }
+
+  @Override
+  public boolean renameForce(String changetoName) {
+    FileSystem fs;
+    try {
+      fs = fileStatus.getPath().getFileSystem(FileFactory.getConfiguration());
+      if (fs instanceof ViewFileSystem) {
+        fs.delete(new Path(changetoName), true);
+        fs.rename(fileStatus.getPath(), new Path(changetoName));
+        return true;
+      } else {
+        return false;
+      }
+    } catch (IOException e) {
+      LOGGER.error("Exception occured" + e.getMessage());
+      return false;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/CompressedDataMeasureDataWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/CompressedDataMeasureDataWrapper.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/CompressedDataMeasureDataWrapper.java
new file mode 100644
index 0000000..c9571b2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/CompressedDataMeasureDataWrapper.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl;
+
+import org.apache.carbondata.core.datastorage.store.MeasureDataWrapper;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+
+public class CompressedDataMeasureDataWrapper implements MeasureDataWrapper {
+
+  private final CarbonReadDataHolder[] values;
+
+  public CompressedDataMeasureDataWrapper(final CarbonReadDataHolder[] values) {
+    this.values = values;
+  }
+
+  @Override public CarbonReadDataHolder[] getValues() {
+    return values;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/DFSFileHolderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/DFSFileHolderImpl.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/DFSFileHolderImpl.java
new file mode 100644
index 0000000..65c6556
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/DFSFileHolderImpl.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.datastorage.store.impl;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+
+public class DFSFileHolderImpl implements FileHolder {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(DFSFileHolderImpl.class.getName());
+  /**
+   * cache to hold filename and its stream
+   */
+  private Map<String, FSDataInputStream> fileNameAndStreamCache;
+
+  public DFSFileHolderImpl() {
+    this.fileNameAndStreamCache =
+        new HashMap<String, FSDataInputStream>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+  }
+
+  @Override public byte[] readByteArray(String filePath, long offset, int length) {
+    FSDataInputStream fileChannel = updateCache(filePath);
+    byte[] byteBffer = read(fileChannel, length, offset);
+    return byteBffer;
+  }
+
+  /**
+   * This method will be used to check whether stream is already present in
+   * cache or not for filepath if not present then create it and then add to
+   * cache, other wise get from cache
+   *
+   * @param filePath fully qualified file path
+   * @return channel
+   */
+  private FSDataInputStream updateCache(String filePath) {
+    FSDataInputStream fileChannel = fileNameAndStreamCache.get(filePath);
+    try {
+      if (null == fileChannel) {
+        Path pt = new Path(filePath);
+        FileSystem fs = FileSystem.get(FileFactory.getConfiguration());
+        fileChannel = fs.open(pt);
+        fileNameAndStreamCache.put(filePath, fileChannel);
+      }
+    } catch (IOException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return fileChannel;
+  }
+
+  /**
+   * This method will be used to read from file based on number of bytes to be read and positon
+   *
+   * @param channel file channel
+   * @param size    number of bytes
+   * @param offset  position
+   * @return byte buffer
+   */
+  private byte[] read(FSDataInputStream channel, int size, long offset) {
+    byte[] byteBffer = new byte[size];
+    try {
+      channel.seek(offset);
+      channel.readFully(byteBffer);
+    } catch (Exception e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return byteBffer;
+  }
+
+  /**
+   * This method will be used to read from file based on number of bytes to be read and positon
+   *
+   * @param channel file channel
+   * @param size    number of bytes
+   * @return byte buffer
+   */
+  private byte[] read(FSDataInputStream channel, int size) {
+    byte[] byteBffer = new byte[size];
+    try {
+      channel.readFully(byteBffer);
+    } catch (Exception e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return byteBffer;
+  }
+
+  @Override public int readInt(String filePath, long offset) {
+    FSDataInputStream fileChannel = updateCache(filePath);
+    int i = -1;
+    try {
+      fileChannel.seek(offset);
+      i = fileChannel.readInt();
+    } catch (IOException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+
+    return i;
+  }
+
+  @Override public long readDouble(String filePath, long offset) {
+    FSDataInputStream fileChannel = updateCache(filePath);
+    long i = -1;
+    try {
+      fileChannel.seek(offset);
+      i = fileChannel.readLong();
+    } catch (IOException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+
+    return i;
+  }
+
+  @Override public void finish() {
+    for (Entry<String, FSDataInputStream> entry : fileNameAndStreamCache.entrySet()) {
+      try {
+        FSDataInputStream channel = entry.getValue();
+        if (null != channel) {
+          channel.close();
+        }
+      } catch (IOException exception) {
+        LOGGER.error(exception, exception.getMessage());
+      }
+    }
+
+  }
+
+  @Override public byte[] readByteArray(String filePath, int length) {
+    FSDataInputStream fileChannel = updateCache(filePath);
+    byte[] byteBffer = read(fileChannel, length);
+    return byteBffer;
+  }
+
+  @Override public long readLong(String filePath, long offset) {
+    FSDataInputStream fileChannel = updateCache(filePath);
+    long i = -1;
+    try {
+      fileChannel.seek(offset);
+      i = fileChannel.readLong();
+    } catch (IOException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return i;
+  }
+
+  @Override public int readInt(String filePath) {
+    FSDataInputStream fileChannel = updateCache(filePath);
+    int i = -1;
+    try {
+      i = fileChannel.readInt();
+    } catch (IOException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return i;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/FileFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/FileFactory.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/FileFactory.java
new file mode 100644
index 0000000..d537d6e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/FileFactory.java
@@ -0,0 +1,477 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile;
+import org.apache.carbondata.core.datastorage.store.filesystem.HDFSCarbonFile;
+import org.apache.carbondata.core.datastorage.store.filesystem.LocalCarbonFile;
+import org.apache.carbondata.core.datastorage.store.filesystem.ViewFSCarbonFile;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+public final class FileFactory {
+  private static Configuration configuration = null;
+
+  private static FileType storeDefaultFileType = FileType.LOCAL;
+
+  static {
+    String property = CarbonUtil.getCarbonStorePath(null, null);
+    if (property != null) {
+      if (property.startsWith(CarbonUtil.HDFS_PREFIX)) {
+        storeDefaultFileType = FileType.HDFS;
+      } else if (property.startsWith(CarbonUtil.VIEWFS_PREFIX)) {
+        storeDefaultFileType = FileType.VIEWFS;
+      }
+    }
+
+    configuration = new Configuration();
+    configuration.addResource(new Path("../core-default.xml"));
+  }
+
+  private FileFactory() {
+
+  }
+
+  public static Configuration getConfiguration() {
+    return configuration;
+  }
+
+  public static FileHolder getFileHolder(FileType fileType) {
+    switch (fileType) {
+      case LOCAL:
+        return new FileHolderImpl();
+      case HDFS:
+      case VIEWFS:
+        return new DFSFileHolderImpl();
+      default:
+        return new FileHolderImpl();
+    }
+  }
+
+  public static FileType getFileType() {
+    String property = CarbonUtil.getCarbonStorePath(null, null);
+    if (property != null) {
+      if (property.startsWith(CarbonUtil.HDFS_PREFIX)) {
+        storeDefaultFileType = FileType.HDFS;
+      } else if (property.startsWith(CarbonUtil.VIEWFS_PREFIX)) {
+        storeDefaultFileType = FileType.VIEWFS;
+      }
+    }
+    return storeDefaultFileType;
+  }
+
+  public static FileType getFileType(String path) {
+    if (path.startsWith(CarbonUtil.HDFS_PREFIX)) {
+      return FileType.HDFS;
+    } else if (path.startsWith(CarbonUtil.VIEWFS_PREFIX)) {
+      return FileType.VIEWFS;
+    }
+    return FileType.LOCAL;
+  }
+
+  public static CarbonFile getCarbonFile(String path, FileType fileType) {
+    switch (fileType) {
+      case LOCAL:
+        return new LocalCarbonFile(path);
+      case HDFS:
+        return new HDFSCarbonFile(path);
+      case VIEWFS:
+        return new ViewFSCarbonFile(path);
+      default:
+        return new LocalCarbonFile(path);
+    }
+  }
+
+  public static DataInputStream getDataInputStream(String path, FileType fileType)
+      throws IOException {
+    path = path.replace("\\", "/");
+    switch (fileType) {
+      case LOCAL:
+        return new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
+      case HDFS:
+      case VIEWFS:
+        Path pt = new Path(path);
+        FileSystem fs = FileSystem.get(configuration);
+        FSDataInputStream stream = fs.open(pt);
+        return new DataInputStream(new BufferedInputStream(stream));
+      default:
+        return new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
+    }
+  }
+
+  public static DataInputStream getDataInputStream(String path, FileType fileType, int bufferSize)
+      throws IOException {
+    path = path.replace("\\", "/");
+    switch (fileType) {
+      case LOCAL:
+        return new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
+      case HDFS:
+      case VIEWFS:
+        Path pt = new Path(path);
+        FileSystem fs = FileSystem.get(configuration);
+        FSDataInputStream stream = fs.open(pt, bufferSize);
+        return new DataInputStream(new BufferedInputStream(stream));
+      default:
+        return new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
+    }
+  }
+
+  /**
+   * return the datainputStream which is seek to the offset of file
+   *
+   * @param path
+   * @param fileType
+   * @param bufferSize
+   * @param offset
+   * @return DataInputStream
+   * @throws IOException
+   */
+  public static DataInputStream getDataInputStream(String path, FileType fileType, int bufferSize,
+      long offset) throws IOException {
+    path = path.replace("\\", "/");
+    switch (fileType) {
+      case HDFS:
+      case VIEWFS:
+        Path pt = new Path(path);
+        FileSystem fs = FileSystem.get(configuration);
+        FSDataInputStream stream = fs.open(pt, bufferSize);
+        stream.seek(offset);
+        return new DataInputStream(new BufferedInputStream(stream));
+      default:
+        FileInputStream fis = new FileInputStream(path);
+        long actualSkipSize = 0;
+        long skipSize = offset;
+        while (actualSkipSize != offset) {
+          actualSkipSize += fis.skip(skipSize);
+          skipSize = skipSize - actualSkipSize;
+        }
+        return new DataInputStream(new BufferedInputStream(fis));
+    }
+  }
+
+  public static DataOutputStream getDataOutputStream(String path, FileType fileType)
+      throws IOException {
+    path = path.replace("\\", "/");
+    switch (fileType) {
+      case LOCAL:
+        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
+      case HDFS:
+      case VIEWFS:
+        Path pt = new Path(path);
+        FileSystem fs = pt.getFileSystem(configuration);
+        FSDataOutputStream stream = fs.create(pt, true);
+        return stream;
+      default:
+        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
+    }
+  }
+
+  public static DataOutputStream getDataOutputStream(String path, FileType fileType,
+      short replicationFactor) throws IOException {
+    path = path.replace("\\", "/");
+    switch (fileType) {
+      case LOCAL:
+        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
+      case HDFS:
+      case VIEWFS:
+        Path pt = new Path(path);
+        FileSystem fs = pt.getFileSystem(configuration);
+        FSDataOutputStream stream = fs.create(pt, replicationFactor);
+        return stream;
+      default:
+        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
+    }
+  }
+
+  public static DataOutputStream getDataOutputStream(String path, FileType fileType, int bufferSize)
+      throws IOException {
+    path = path.replace("\\", "/");
+    switch (fileType) {
+      case LOCAL:
+        return new DataOutputStream(
+            new BufferedOutputStream(new FileOutputStream(path), bufferSize));
+      case HDFS:
+      case VIEWFS:
+        Path pt = new Path(path);
+        FileSystem fs = pt.getFileSystem(configuration);
+        FSDataOutputStream stream = fs.create(pt, true, bufferSize);
+        return stream;
+      default:
+        return new DataOutputStream(
+            new BufferedOutputStream(new FileOutputStream(path), bufferSize));
+    }
+  }
+
+  public static DataOutputStream getDataOutputStream(String path, FileType fileType, int bufferSize,
+      boolean append) throws IOException {
+    path = path.replace("\\", "/");
+    switch (fileType) {
+      case LOCAL:
+        return new DataOutputStream(
+            new BufferedOutputStream(new FileOutputStream(path, append), bufferSize));
+      case HDFS:
+      case VIEWFS:
+        Path pt = new Path(path);
+        FileSystem fs = pt.getFileSystem(configuration);
+        FSDataOutputStream stream = null;
+        if (append) {
+          // append to a file only if file already exists else file not found
+          // exception will be thrown by hdfs
+          if (CarbonUtil.isFileExists(path)) {
+            stream = fs.append(pt, bufferSize);
+          } else {
+            stream = fs.create(pt, true, bufferSize);
+          }
+        } else {
+          stream = fs.create(pt, true, bufferSize);
+        }
+        return stream;
+      default:
+        return new DataOutputStream(
+            new BufferedOutputStream(new FileOutputStream(path), bufferSize));
+    }
+  }
+
+  public static DataOutputStream getDataOutputStream(String path, FileType fileType, int bufferSize,
+      long blockSize) throws IOException {
+    path = path.replace("\\", "/");
+    switch (fileType) {
+      case LOCAL:
+        return new DataOutputStream(
+            new BufferedOutputStream(new FileOutputStream(path), bufferSize));
+      case HDFS:
+      case VIEWFS:
+        Path pt = new Path(path);
+        FileSystem fs = pt.getFileSystem(configuration);
+        FSDataOutputStream stream =
+            fs.create(pt, true, bufferSize, fs.getDefaultReplication(pt), blockSize);
+        return stream;
+      default:
+        return new DataOutputStream(
+            new BufferedOutputStream(new FileOutputStream(path), bufferSize));
+    }
+  }
+
+  /**
+   * This method checks the given path exists or not and also is it file or
+   * not if the performFileCheck is true
+   *
+   * @param filePath         - Path
+   * @param fileType         - FileType Local/HDFS
+   * @param performFileCheck - Provide false for folders, true for files and
+   */
+  public static boolean isFileExist(String filePath, FileType fileType, boolean performFileCheck)
+      throws IOException {
+    filePath = filePath.replace("\\", "/");
+    switch (fileType) {
+      case HDFS:
+      case VIEWFS:
+        Path path = new Path(filePath);
+        FileSystem fs = path.getFileSystem(configuration);
+        if (performFileCheck) {
+          return fs.exists(path) && fs.isFile(path);
+        } else {
+          return fs.exists(path);
+        }
+
+      case LOCAL:
+      default:
+        File defaultFile = new File(filePath);
+
+        if (performFileCheck) {
+          return defaultFile.exists() && defaultFile.isFile();
+        } else {
+          return defaultFile.exists();
+        }
+    }
+  }
+
+  /**
+   * This method checks the given path exists or not and also is it file or
+   * not if the performFileCheck is true
+   *
+   * @param filePath - Path
+   * @param fileType - FileType Local/HDFS
+   */
+  public static boolean isFileExist(String filePath, FileType fileType) throws IOException {
+    filePath = filePath.replace("\\", "/");
+    switch (fileType) {
+      case HDFS:
+      case VIEWFS:
+        Path path = new Path(filePath);
+        FileSystem fs = path.getFileSystem(configuration);
+        return fs.exists(path);
+
+      case LOCAL:
+      default:
+        File defaultFile = new File(filePath);
+        return defaultFile.exists();
+    }
+  }
+
+  public static boolean createNewFile(String filePath, FileType fileType) throws IOException {
+    filePath = filePath.replace("\\", "/");
+    switch (fileType) {
+      case HDFS:
+      case VIEWFS:
+        Path path = new Path(filePath);
+        FileSystem fs = path.getFileSystem(configuration);
+        return fs.createNewFile(path);
+
+      case LOCAL:
+      default:
+        File file = new File(filePath);
+        return file.createNewFile();
+    }
+  }
+
+  public static boolean mkdirs(String filePath, FileType fileType) throws IOException {
+    filePath = filePath.replace("\\", "/");
+    switch (fileType) {
+      case HDFS:
+      case VIEWFS:
+        Path path = new Path(filePath);
+        FileSystem fs = path.getFileSystem(configuration);
+        return fs.mkdirs(path);
+      case LOCAL:
+      default:
+        File file = new File(filePath);
+        return file.mkdirs();
+    }
+  }
+
+  /**
+   * for getting the dataoutput stream using the hdfs filesystem append API.
+   *
+   * @param path
+   * @param fileType
+   * @return
+   * @throws IOException
+   */
+  public static DataOutputStream getDataOutputStreamUsingAppend(String path, FileType fileType)
+      throws IOException {
+    path = path.replace("\\", "/");
+    switch (fileType) {
+      case LOCAL:
+        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path, true)));
+      case HDFS:
+      case VIEWFS:
+        Path pt = new Path(path);
+        FileSystem fs = pt.getFileSystem(configuration);
+        FSDataOutputStream stream = fs.append(pt);
+        return stream;
+      default:
+        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
+    }
+  }
+
+  /**
+   * for creating a new Lock file and if it is successfully created
+   * then in case of abrupt shutdown then the stream to that file will be closed.
+   *
+   * @param filePath
+   * @param fileType
+   * @return
+   * @throws IOException
+   */
+  public static boolean createNewLockFile(String filePath, FileType fileType) throws IOException {
+    filePath = filePath.replace("\\", "/");
+    switch (fileType) {
+      case HDFS:
+      case VIEWFS:
+        Path path = new Path(filePath);
+        FileSystem fs = path.getFileSystem(configuration);
+        if (fs.createNewFile(path)) {
+          fs.deleteOnExit(path);
+          return true;
+        }
+        return false;
+      case LOCAL:
+      default:
+        File file = new File(filePath);
+        return file.createNewFile();
+    }
+  }
+
+  public enum FileType {
+    LOCAL, HDFS, VIEWFS
+  }
+
+  /**
+   * below method will be used to update the file path
+   * for local type
+   * it removes the file:/ from the path
+   *
+   * @param filePath
+   * @return updated file path without url for local
+   */
+  public static String getUpdatedFilePath(String filePath) {
+    FileType fileType = getFileType(filePath);
+    switch (fileType) {
+      case HDFS:
+      case VIEWFS:
+        return filePath;
+      case LOCAL:
+      default:
+        Path pathWithoutSchemeAndAuthority =
+            Path.getPathWithoutSchemeAndAuthority(new Path(filePath));
+        return pathWithoutSchemeAndAuthority.toString();
+    }
+  }
+
+  /**
+   * It computes size of directory
+   *
+   * @param filePath
+   * @return size in bytes
+   * @throws IOException
+   */
+  public static long getDirectorySize(String filePath) throws IOException {
+    FileType fileType = getFileType(filePath);
+    switch (fileType) {
+      case HDFS:
+      case VIEWFS:
+        Path path = new Path(filePath);
+        FileSystem fs = path.getFileSystem(configuration);
+        return fs.getContentSummary(path).getLength();
+      case LOCAL:
+      default:
+        File file = new File(filePath);
+        return FileUtils.sizeOfDirectory(file);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/FileHolderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/FileHolderImpl.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/FileHolderImpl.java
new file mode 100644
index 0000000..5fefb7b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/FileHolderImpl.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+public class FileHolderImpl implements FileHolder {
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(FileHolderImpl.class.getName());
+  /**
+   * cache to hold filename and its stream
+   */
+  private Map<String, FileChannel> fileNameAndStreamCache;
+
+  /**
+   * FileHolderImpl Constructor
+   * It will create the cache
+   */
+  public FileHolderImpl() {
+    this.fileNameAndStreamCache =
+        new HashMap<String, FileChannel>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+  }
+
+  public FileHolderImpl(int capacity) {
+    this.fileNameAndStreamCache = new HashMap<String, FileChannel>(capacity);
+  }
+
+  /**
+   * This method will be used to read the byte array from file based on offset
+   * and length(number of bytes) need to read
+   *
+   * @param filePath fully qualified file path
+   * @param offset   reading start position,
+   * @param length   number of bytes to be read
+   * @return read byte array
+   */
+  @Override public byte[] readByteArray(String filePath, long offset, int length) {
+    FileChannel fileChannel = updateCache(filePath);
+    ByteBuffer byteBffer = read(fileChannel, length, offset);
+    return byteBffer.array();
+  }
+
+  /**
+   * This method will be used to close all the streams currently present in the cache
+   */
+  @Override public void finish() {
+
+    for (Entry<String, FileChannel> entry : fileNameAndStreamCache.entrySet()) {
+      try {
+        FileChannel channel = entry.getValue();
+        if (null != channel) {
+          channel.close();
+        }
+      } catch (IOException exception) {
+        LOGGER.error(exception, exception.getMessage());
+      }
+    }
+
+  }
+
+  /**
+   * This method will be used to read int from file from postion(offset), here
+   * length will be always 4 bacause int byte size if 4
+   *
+   * @param filePath fully qualified file path
+   * @param offset   reading start position,
+   * @return read int
+   */
+  @Override public int readInt(String filePath, long offset) {
+    FileChannel fileChannel = updateCache(filePath);
+    ByteBuffer byteBffer = read(fileChannel, CarbonCommonConstants.INT_SIZE_IN_BYTE, offset);
+    return byteBffer.getInt();
+  }
+
+  /**
+   * This method will be used to read int from file from postion(offset), here
+   * length will be always 4 bacause int byte size if 4
+   *
+   * @param filePath fully qualified file path
+   * @return read int
+   */
+  @Override public int readInt(String filePath) {
+    FileChannel fileChannel = updateCache(filePath);
+    ByteBuffer byteBffer = read(fileChannel, CarbonCommonConstants.INT_SIZE_IN_BYTE);
+    return byteBffer.getInt();
+  }
+
+  /**
+   * This method will be used to read int from file from postion(offset), here
+   * length will be always 4 bacause int byte size if 4
+   *
+   * @param filePath fully qualified file path
+   * @param offset   reading start position,
+   * @return read int
+   */
+  @Override public long readDouble(String filePath, long offset) {
+    FileChannel fileChannel = updateCache(filePath);
+    ByteBuffer byteBffer = read(fileChannel, CarbonCommonConstants.LONG_SIZE_IN_BYTE, offset);
+    return byteBffer.getLong();
+  }
+
+  /**
+   * This method will be used to check whether stream is already present in
+   * cache or not for filepath if not present then create it and then add to
+   * cache, other wise get from cache
+   *
+   * @param filePath fully qualified file path
+   * @return channel
+   */
+  private FileChannel updateCache(String filePath) {
+    FileChannel fileChannel = fileNameAndStreamCache.get(filePath);
+    try {
+      if (null == fileChannel) {
+        FileInputStream stream = new FileInputStream(filePath);
+        fileChannel = stream.getChannel();
+        fileNameAndStreamCache.put(filePath, fileChannel);
+      }
+    } catch (IOException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return fileChannel;
+  }
+
+  /**
+   * This method will be used to read from file based on number of bytes to be read and positon
+   *
+   * @param channel file channel
+   * @param size    number of bytes
+   * @param offset  position
+   * @return byte buffer
+   */
+  private ByteBuffer read(FileChannel channel, int size, long offset) {
+    ByteBuffer byteBffer = ByteBuffer.allocate(size);
+    try {
+      channel.position(offset);
+      channel.read(byteBffer);
+    } catch (Exception e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    byteBffer.rewind();
+    return byteBffer;
+  }
+
+  /**
+   * This method will be used to read from file based on number of bytes to be read and positon
+   *
+   * @param channel file channel
+   * @param size    number of bytes
+   * @return byte buffer
+   */
+  private ByteBuffer read(FileChannel channel, int size) {
+    ByteBuffer byteBffer = ByteBuffer.allocate(size);
+    try {
+      channel.read(byteBffer);
+    } catch (Exception e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    byteBffer.rewind();
+    return byteBffer;
+  }
+
+
+  /**
+   * This method will be used to read the byte array from file based on length(number of bytes)
+   *
+   * @param filePath fully qualified file path
+   * @param length   number of bytes to be read
+   * @return read byte array
+   */
+  @Override public byte[] readByteArray(String filePath, int length) {
+    FileChannel fileChannel = updateCache(filePath);
+    ByteBuffer byteBffer = read(fileChannel, length);
+    return byteBffer.array();
+  }
+
+  /**
+   * This method will be used to read long from file from postion(offset), here
+   * length will be always 8 bacause int byte size is 8
+   *
+   * @param filePath fully qualified file path
+   * @param offset   reading start position,
+   * @return read long
+   */
+  @Override public long readLong(String filePath, long offset) {
+    FileChannel fileChannel = updateCache(filePath);
+    ByteBuffer byteBffer = read(fileChannel, CarbonCommonConstants.LONG_SIZE_IN_BYTE, offset);
+    return byteBffer.getLong();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/MemoryMappedFileHolderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/MemoryMappedFileHolderImpl.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/MemoryMappedFileHolderImpl.java
new file mode 100644
index 0000000..98d0039
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/MemoryMappedFileHolderImpl.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.datastorage.store.impl;
+
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+public class MemoryMappedFileHolderImpl implements FileHolder {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(MemoryMappedFileHolderImpl.class.getName());
+
+  private Map<String, FileChannel> fileNameAndStreamCache;
+  private Map<String, MappedByteBuffer> fileNameAndMemoryMappedFileCache;
+
+  public MemoryMappedFileHolderImpl() {
+    this(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+  }
+
+  public MemoryMappedFileHolderImpl(int capacity) {
+    this.fileNameAndStreamCache = new HashMap<String, FileChannel>(capacity);
+    this.fileNameAndMemoryMappedFileCache = new HashMap<String, MappedByteBuffer>(capacity);
+  }
+
+  private MappedByteBuffer updateCache(String filePath) {
+    MappedByteBuffer byteBuffer = fileNameAndMemoryMappedFileCache.get(filePath);
+    try {
+      if (null == byteBuffer) {
+        FileChannel fileChannel = new RandomAccessFile(filePath, "r").getChannel();
+        byteBuffer = fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, fileChannel.size());
+        fileNameAndStreamCache.put(filePath, fileChannel);
+        fileNameAndMemoryMappedFileCache.put(filePath, byteBuffer);
+      }
+    } catch (IOException e) {
+      LOGGER.error(e, e.getMessage());
+    }
+    return byteBuffer;
+  }
+
+  @Override
+  public byte[] readByteArray(String filePath, long offset, int length) {
+    byte[] dst = new byte[length];
+    updateCache(filePath).get(dst, (int)offset, length);
+    return dst;
+  }
+
+  @Override
+  public byte[] readByteArray(String filePath, int length) {
+    byte[] dst = new byte[length];
+    updateCache(filePath).get(dst);
+    return dst;
+  }
+
+  @Override
+  public int readInt(String filePath, long offset) {
+    byte[] dst = readByteArray(filePath, offset, CarbonCommonConstants.INT_SIZE_IN_BYTE);
+    return ByteBuffer.wrap(dst).getInt();
+  }
+
+  @Override
+  public long readLong(String filePath, long offset) {
+    byte[] dst = readByteArray(filePath, offset, CarbonCommonConstants.LONG_SIZE_IN_BYTE);
+    return ByteBuffer.wrap(dst).getLong();
+  }
+
+  @Override
+  public int readInt(String filePath) {
+    return updateCache(filePath).getInt();
+  }
+
+  @Override
+  public long readDouble(String filePath, long offset) {
+    byte[] dst = readByteArray(filePath, offset, CarbonCommonConstants.LONG_SIZE_IN_BYTE);
+    return ByteBuffer.wrap(dst).getLong();
+  }
+
+  @Override
+  public void finish() {
+    fileNameAndMemoryMappedFileCache.clear();
+    for (Entry<String, FileChannel> entry : fileNameAndStreamCache.entrySet()) {
+      try {
+        FileChannel channel = entry.getValue();
+        if (null != channel) {
+          channel.close();
+        }
+      } catch (IOException exception) {
+        LOGGER.error(exception, exception.getMessage());
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/AbstractHeavyCompressedDoubleArrayDataStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/AbstractHeavyCompressedDoubleArrayDataStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/AbstractHeavyCompressedDoubleArrayDataStore.java
new file mode 100644
index 0000000..21c2a60
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/AbstractHeavyCompressedDoubleArrayDataStore.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.data.compressed;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.NodeMeasureDataStore;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonWriteDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public abstract class AbstractHeavyCompressedDoubleArrayDataStore
+    implements NodeMeasureDataStore //NodeMeasureDataStore<double[]>
+{
+
+  /**
+   * values.
+   */
+  protected ValueCompressonHolder.UnCompressValue[] values;
+
+  /**
+   * compressionModel.
+   */
+  protected ValueCompressionModel compressionModel;
+
+  /**
+   * type
+   */
+  private char[] type;
+
+  /**
+   * AbstractHeavyCompressedDoubleArrayDataStore constructor.
+   *
+   * @param compressionModel
+   */
+  public AbstractHeavyCompressedDoubleArrayDataStore(ValueCompressionModel compressionModel) {
+    this.compressionModel = compressionModel;
+    if (null != compressionModel) {
+      this.type = compressionModel.getType();
+      values =
+          new ValueCompressonHolder.UnCompressValue[compressionModel.getUnCompressValues().length];
+    }
+  }
+
+  @Override public byte[][] getWritableMeasureDataArray(CarbonWriteDataHolder[] dataHolder) {
+    for (int i = 0; i < compressionModel.getUnCompressValues().length; i++) {
+      values[i] = compressionModel.getUnCompressValues()[i].getNew();
+      if (type[i] != CarbonCommonConstants.BYTE_VALUE_MEASURE
+          && type[i] != CarbonCommonConstants.BIG_DECIMAL_MEASURE) {
+        if (type[i] == CarbonCommonConstants.BIG_INT_MEASURE) {
+          values[i].setValue(ValueCompressionUtil
+              .getCompressedValues(compressionModel.getCompType()[i],
+                  dataHolder[i].getWritableLongValues(), compressionModel.getChangedDataType()[i],
+                  (long) compressionModel.getMaxValue()[i], compressionModel.getDecimal()[i]));
+        } else {
+          values[i].setValue(ValueCompressionUtil
+              .getCompressedValues(compressionModel.getCompType()[i],
+                  dataHolder[i].getWritableDoubleValues(), compressionModel.getChangedDataType()[i],
+                  (double) compressionModel.getMaxValue()[i], compressionModel.getDecimal()[i]));
+        }
+      } else {
+        values[i].setValue(dataHolder[i].getWritableByteArrayValues());
+      }
+      values[i] = values[i].compress();
+    }
+    byte[][] returnValue = new byte[values.length][];
+    for (int i = 0; i < values.length; i++) {
+      returnValue[i] = values[i].getBackArrayData();
+    }
+    return returnValue;
+  }
+
+  @Override public short getLength() {
+    return values != null ? (short) values.length : 0;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataFileStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataFileStore.java
new file mode 100644
index 0000000..2fd873b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataFileStore.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.data.compressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.MeasureDataWrapper;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.datastorage.store.impl.CompressedDataMeasureDataWrapper;
+
+public class HeavyCompressedDoubleArrayDataFileStore
+    extends AbstractHeavyCompressedDoubleArrayDataStore {
+  /**
+   * measuresOffsetsArray.
+   */
+  private long[] measuresOffsetsArray;
+
+  /**
+   * measuresLengthArray.
+   */
+  private int[] measuresLengthArray;
+
+  /**
+   * fileName.
+   */
+  private String fileName;
+
+  /**
+   * HeavyCompressedDoubleArrayDataFileStore.
+   *
+   * @param compressionModel
+   * @param measuresOffsetsArray
+   * @param measuresLengthArray
+   * @param fileName
+   */
+  public HeavyCompressedDoubleArrayDataFileStore(ValueCompressionModel compressionModel,
+      long[] measuresOffsetsArray, int[] measuresLengthArray, String fileName) {
+    super(compressionModel);
+    if (null != compressionModel) {
+      this.fileName = fileName;
+      this.measuresLengthArray = measuresLengthArray;
+      this.measuresOffsetsArray = measuresOffsetsArray;
+      for (int i = 0; i < values.length; i++) {
+        values[i] = compressionModel.getUnCompressValues()[i].getNew().getCompressorObject();
+      }
+    }
+  }
+
+  @Override public MeasureDataWrapper getBackData(int[] cols, FileHolder fileHolder) {
+    if (null == compressionModel) {
+      return null;
+    }
+    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
+
+    if (cols != null) {
+      for (int i = 0; i < cols.length; i++) {
+        ValueCompressonHolder.UnCompressValue copy = values[cols[i]].getNew();
+        copy.setValue(fileHolder
+            .readByteArray(fileName, measuresOffsetsArray[cols[i]], measuresLengthArray[cols[i]]));
+        vals[cols[i]] = copy.uncompress(compressionModel.getChangedDataType()[cols[i]])
+            .getValues(compressionModel.getDecimal()[cols[i]],
+                compressionModel.getMaxValue()[cols[i]]);
+        copy = null;
+      }
+    } else {
+      for (int j = 0; j < vals.length; j++) {
+        ValueCompressonHolder.UnCompressValue copy = values[j].getNew();
+        copy.setValue(
+            fileHolder.readByteArray(fileName, measuresOffsetsArray[j], measuresLengthArray[j]));
+        vals[j] = copy.uncompress(compressionModel.getChangedDataType()[j])
+            .getValues(compressionModel.getDecimal()[j], compressionModel.getMaxValue()[j]);
+        copy = null;
+      }
+    }
+    return new CompressedDataMeasureDataWrapper(vals);
+
+  }
+
+  @Override public MeasureDataWrapper getBackData(int cols, FileHolder fileHolder) {
+    if (null == compressionModel) {
+      return null;
+    }
+    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
+    ValueCompressonHolder.UnCompressValue copy = values[cols].getNew();
+    copy.setValue(
+        fileHolder.readByteArray(fileName, measuresOffsetsArray[cols], measuresLengthArray[cols]));
+    vals[cols] = copy.uncompress(compressionModel.getChangedDataType()[cols])
+        .getValues(compressionModel.getDecimal()[cols], compressionModel.getMaxValue()[cols]);
+    return new CompressedDataMeasureDataWrapper(vals);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataInMemoryStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataInMemoryStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataInMemoryStore.java
new file mode 100644
index 0000000..f726ba7
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/compressed/HeavyCompressedDoubleArrayDataInMemoryStore.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.data.compressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.MeasureDataWrapper;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.datastorage.store.impl.CompressedDataMeasureDataWrapper;
+
+public class HeavyCompressedDoubleArrayDataInMemoryStore
+    extends AbstractHeavyCompressedDoubleArrayDataStore {
+
+  public HeavyCompressedDoubleArrayDataInMemoryStore(ValueCompressionModel compressionModel,
+      long[] measuresOffsetsArray, int[] measuresLengthArray, String fileName,
+      FileHolder fileHolder) {
+    super(compressionModel);
+    for (int i = 0; i < measuresLengthArray.length; i++) {
+      values[i] = compressionModel.getUnCompressValues()[i].getCompressorObject();
+      values[i].setValue(
+          fileHolder.readByteArray(fileName, measuresOffsetsArray[i], measuresLengthArray[i]));
+    }
+  }
+
+  public HeavyCompressedDoubleArrayDataInMemoryStore(ValueCompressionModel compressionModel) {
+    super(compressionModel);
+  }
+
+  @Override public MeasureDataWrapper getBackData(int[] cols, FileHolder fileHolder) {
+    if (null == compressionModel) {
+      return null;
+    }
+    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
+    if (cols != null) {
+      for (int i = 0; i < cols.length; i++) {
+        vals[cols[i]] = values[cols[i]].uncompress(compressionModel.getChangedDataType()[cols[i]])
+            .getValues(compressionModel.getDecimal()[cols[i]],
+                compressionModel.getMaxValue()[cols[i]]);
+      }
+    } else {
+      for (int i = 0; i < vals.length; i++) {
+
+        vals[i] = values[i].uncompress(compressionModel.getChangedDataType()[i])
+            .getValues(compressionModel.getDecimal()[i], compressionModel.getMaxValue()[i]);
+      }
+    }
+    return new CompressedDataMeasureDataWrapper(vals);
+  }
+
+  @Override public MeasureDataWrapper getBackData(int cols, FileHolder fileHolder) {
+    if (null == compressionModel) {
+      return null;
+    }
+    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[values.length];
+    vals[cols] = values[cols].uncompress(compressionModel.getChangedDataType()[cols])
+        .getValues(compressionModel.getDecimal()[cols], compressionModel.getMaxValue()[cols]);
+    return new CompressedDataMeasureDataWrapper(vals);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/AbstractDoubleArrayDataStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/AbstractDoubleArrayDataStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/AbstractDoubleArrayDataStore.java
new file mode 100644
index 0000000..8271e43
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/AbstractDoubleArrayDataStore.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.data.uncompressed;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.NodeMeasureDataStore;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonWriteDataHolder;
+import org.apache.carbondata.core.util.ValueCompressionUtil;
+
+public abstract class AbstractDoubleArrayDataStore implements NodeMeasureDataStore {
+
+  protected ValueCompressonHolder.UnCompressValue[] values;
+
+  protected ValueCompressionModel compressionModel;
+
+  private char[] type;
+
+  public AbstractDoubleArrayDataStore(ValueCompressionModel compressionModel) {
+    this.compressionModel = compressionModel;
+    if (null != compressionModel) {
+      values =
+          new ValueCompressonHolder.UnCompressValue[compressionModel.getUnCompressValues().length];
+      type = compressionModel.getType();
+    }
+  }
+
+  @Override public byte[][] getWritableMeasureDataArray(CarbonWriteDataHolder[] dataHolder) {
+    values =
+        new ValueCompressonHolder.UnCompressValue[compressionModel.getUnCompressValues().length];
+    for (int i = 0; i < compressionModel.getUnCompressValues().length; i++) {
+      values[i] = compressionModel.getUnCompressValues()[i].getNew();
+      if (type[i] != CarbonCommonConstants.BYTE_VALUE_MEASURE
+          && type[i] != CarbonCommonConstants.BIG_DECIMAL_MEASURE) {
+        if (type[i] == CarbonCommonConstants.BIG_INT_MEASURE) {
+          values[i].setValue(ValueCompressionUtil
+              .getCompressedValues(compressionModel.getCompType()[i],
+                  dataHolder[i].getWritableLongValues(), compressionModel.getChangedDataType()[i],
+                  (long) compressionModel.getMaxValue()[i], compressionModel.getDecimal()[i]));
+        } else {
+          values[i].setValue(ValueCompressionUtil
+              .getCompressedValues(compressionModel.getCompType()[i],
+                  dataHolder[i].getWritableDoubleValues(), compressionModel.getChangedDataType()[i],
+                  (double) compressionModel.getMaxValue()[i], compressionModel.getDecimal()[i]));
+        }
+      } else {
+        values[i].setValue(dataHolder[i].getWritableByteArrayValues());
+      }
+    }
+
+    byte[][] resturnValue = new byte[values.length][];
+
+    for (int i = 0; i < values.length; i++) {
+      resturnValue[i] = values[i].getBackArrayData();
+    }
+    return resturnValue;
+  }
+
+  @Override public short getLength() {
+    return values != null ? (short) values.length : 0;
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataFileStore.java b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataFileStore.java
new file mode 100644
index 0000000..182b868
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastorage/store/impl/data/uncompressed/DoubleArrayDataFileStore.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.datastorage.store.impl.data.uncompressed;
+
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.MeasureDataWrapper;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+import org.apache.carbondata.core.datastorage.store.impl.CompressedDataMeasureDataWrapper;
+
+public class DoubleArrayDataFileStore extends AbstractDoubleArrayDataStore {
+
+  private long[] measuresOffsetsArray;
+
+  private int[] measuresLengthArray;
+
+  private String fileName;
+
+  public DoubleArrayDataFileStore(ValueCompressionModel compressionModel,
+      long[] measuresOffsetsArray, String fileName, int[] measuresLengthArray) {
+    super(compressionModel);
+    this.fileName = fileName;
+    this.measuresLengthArray = measuresLengthArray;
+    this.measuresOffsetsArray = measuresOffsetsArray;
+  }
+
+  @Override public MeasureDataWrapper getBackData(int[] cols, FileHolder fileHolder) {
+    if (null == compressionModel) {
+      return null;
+    }
+    UnCompressValue[] unComp = new UnCompressValue[measuresLengthArray.length];
+    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[measuresLengthArray.length];
+    if (cols != null) {
+      for (int i = 0; i < cols.length; i++) {
+        unComp[cols[i]] = compressionModel.getUnCompressValues()[cols[i]].getNew();
+        unComp[cols[i]].setValueInBytes(fileHolder
+            .readByteArray(fileName, measuresOffsetsArray[cols[i]], measuresLengthArray[cols[i]]));
+        vals[cols[i]] = unComp[cols[i]].getValues(compressionModel.getDecimal()[cols[i]],
+            compressionModel.getMaxValue()[cols[i]]);
+      }
+    } else {
+      for (int i = 0; i < unComp.length; i++) {
+
+        unComp[i] = compressionModel.getUnCompressValues()[i].getNew();
+        unComp[i].setValueInBytes(
+            fileHolder.readByteArray(fileName, measuresOffsetsArray[i], measuresLengthArray[i]));
+        vals[i] = unComp[i]
+            .getValues(compressionModel.getDecimal()[i], compressionModel.getMaxValue()[i]);
+      }
+    }
+    return new CompressedDataMeasureDataWrapper(vals);
+  }
+
+  @Override public MeasureDataWrapper getBackData(int cols, FileHolder fileHolder) {
+    if (null == compressionModel) {
+      return null;
+    }
+    UnCompressValue[] unComp = new UnCompressValue[measuresLengthArray.length];
+    CarbonReadDataHolder[] vals = new CarbonReadDataHolder[measuresLengthArray.length];
+
+    unComp[cols] = compressionModel.getUnCompressValues()[cols].getNew();
+    unComp[cols].setValueInBytes(
+        fileHolder.readByteArray(fileName, measuresOffsetsArray[cols], measuresLengthArray[cols]));
+    vals[cols] = unComp[cols]
+        .getValues(compressionModel.getDecimal()[cols], compressionModel.getMaxValue()[cols]);
+    return new CompressedDataMeasureDataWrapper(vals);
+  }
+}



[38/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthVariableSplitGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthVariableSplitGenerator.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthVariableSplitGenerator.java
new file mode 100644
index 0000000..34515da
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/columnar/impl/MultiDimKeyVarLengthVariableSplitGenerator.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.keygenerator.columnar.impl;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.core.keygenerator.columnar.ColumnarSplitter;
+import org.apache.carbondata.core.keygenerator.mdkey.MultiDimKeyVarLengthGenerator;
+
+public class MultiDimKeyVarLengthVariableSplitGenerator extends MultiDimKeyVarLengthGenerator
+    implements ColumnarSplitter {
+
+  /**
+   *
+   */
+  private static final long serialVersionUID = 1L;
+
+  private int[] dimensionsToSplit;
+
+  private int[] blockKeySize;
+
+  public MultiDimKeyVarLengthVariableSplitGenerator(int[] lens, int[] dimSplit) {
+    super(lens);
+    this.dimensionsToSplit = dimSplit;
+    initialise();
+
+  }
+
+  private void initialise() {
+    int s = 0;
+    List<Set<Integer>> splitList =
+        new ArrayList<Set<Integer>>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+    Set<Integer> split = new TreeSet<Integer>();
+    splitList.add(split);
+    int dimSplitIndx = 0;
+
+    for (int i = 0; i < byteRangesForKeys.length; i++) {
+      if (s == dimensionsToSplit[dimSplitIndx]) {
+        s = 0;
+        split = new TreeSet<Integer>();
+        splitList.add(split);
+        dimSplitIndx++;
+      }
+      for (int j = 0; j < byteRangesForKeys[i].length; j++) {
+        for (int j2 = byteRangesForKeys[i][0]; j2 <= byteRangesForKeys[i][1]; j2++) {
+          split.add(j2);
+        }
+      }
+      s++;
+
+    }
+    List<Integer>[] splits = new List[splitList.size()];
+    int i = 0;
+    for (Set<Integer> splitLocal : splitList) {
+      List<Integer> range = new ArrayList<Integer>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+      for (Integer index : splitLocal) {
+        range.add(index);
+      }
+      splits[i++] = range;
+    }
+    for (int j = 1; j < splits.length; j++) {
+      if (splits[j - 1].get(splits[j - 1].size() - 1) == splits[j].get(0)) {
+        splits[j].remove(0);
+      }
+    }
+    int[][] splitDimArray = new int[splits.length][];
+    for (int j = 0; j < splits.length; j++) {
+      int[] a = convertToArray(splits[j]);
+      splitDimArray[j] = a.length > 0 ? new int[] { a[0], a[a.length - 1] } : a;
+    }
+
+    int[][] dimBlockArray = new int[byteRangesForKeys.length][];
+    Set<Integer>[] dimBlockSet = new Set[dimBlockArray.length];
+    for (int k = 0; k < byteRangesForKeys.length; k++) {
+      int[] dimRange = byteRangesForKeys[k];
+      Set<Integer> dimBlockPosSet = new TreeSet<Integer>();
+      dimBlockSet[k] = dimBlockPosSet;
+      for (int j = 0; j < splitDimArray.length; j++) {
+        if (dimRange[0] >= splitDimArray[j][0] && dimRange[0] <= splitDimArray[j][1]) {
+          dimBlockPosSet.add(j);
+        }
+        if (dimRange[1] >= splitDimArray[j][0] && dimRange[1] <= splitDimArray[j][1]) {
+          dimBlockPosSet.add(j);
+        }
+      }
+
+    }
+
+    for (int j = 0; j < dimBlockSet.length; j++) {
+      dimBlockArray[j] = convertToArray(dimBlockSet[j]);
+    }
+
+    int[][] splitDimArrayLocalIndexes = new int[splitDimArray.length][];
+    for (int j = 0; j < splitDimArrayLocalIndexes.length; j++) {
+      splitDimArrayLocalIndexes[j] = splitDimArray[j].length > 0 ?
+          new int[] { 0, splitDimArray[j][1] - splitDimArray[j][0] } :
+          new int[0];
+    }
+
+    int[][][] byteRangesForDims = new int[byteRangesForKeys.length][][];
+    for (int j = 0; j < byteRangesForKeys.length; j++) {
+      if (dimBlockArray[j].length > 1) {
+        int[] bArray1 = splitDimArrayLocalIndexes[dimBlockArray[j][0]];
+        byteRangesForDims[j] = new int[2][2];
+        byteRangesForDims[j][0] =
+            new int[] { bArray1[bArray1.length - 1], bArray1[bArray1.length - 1] };
+        byteRangesForDims[j][1] = new int[] { 0,
+            (byteRangesForKeys[j][byteRangesForKeys[j].length - 1] - byteRangesForKeys[j][0]) - 1 };
+      } else {
+        byteRangesForDims[j] = new int[1][1];
+        int[] bArray1 = splitDimArray[dimBlockArray[j][0]];
+        byteRangesForDims[j][0] = new int[] { byteRangesForKeys[j][0] - bArray1[0],
+            byteRangesForKeys[j][1] - bArray1[0] };
+      }
+    }
+    blockKeySize = new int[splitDimArray.length];
+
+    for (int j = 0; j < blockKeySize.length; j++) {
+      blockKeySize[j] =
+          splitDimArray[j].length > 0 ? splitDimArray[j][1] - splitDimArray[j][0] + 1 : 0;
+    }
+
+  }
+
+  private int[] convertToArray(List<Integer> list) {
+    int[] ints = new int[list.size()];
+    for (int i = 0; i < ints.length; i++) {
+      ints[i] = list.get(i);
+    }
+    return ints;
+  }
+
+  private int[] convertToArray(Set<Integer> set) {
+    int[] ints = new int[set.size()];
+    int i = 0;
+    for (Iterator iterator = set.iterator(); iterator.hasNext(); ) {
+      ints[i++] = (Integer) iterator.next();
+    }
+    return ints;
+  }
+
+  @Override public byte[][] splitKey(byte[] key) {
+    byte[][] split = new byte[blockKeySize.length][];
+    int copyIndex = 0;
+    for (int i = 0; i < split.length; i++) {
+      split[i] = new byte[blockKeySize[i]];
+      System.arraycopy(key, copyIndex, split[i], 0, split[i].length);
+      copyIndex += blockKeySize[i];
+    }
+    return split;
+  }
+
+  @Override public byte[][] generateAndSplitKey(long[] keys) throws KeyGenException {
+    return splitKey(generateKey(keys));
+  }
+
+  @Override public byte[][] generateAndSplitKey(int[] keys) throws KeyGenException {
+    return splitKey(generateKey(keys));
+  }
+
+  @Override public long[] getKeyArray(byte[][] key) {
+    byte[] fullKey = new byte[getKeySizeInBytes()];
+    int copyIndex = 0;
+    for (int i = 0; i < key.length; i++) {
+      System.arraycopy(key[i], 0, fullKey, copyIndex, key[i].length);
+      copyIndex += key[i].length;
+    }
+    return getKeyArray(fullKey);
+  }
+
+  @Override public byte[] getKeyByteArray(byte[][] key) {
+    byte[] fullKey = new byte[getKeySizeInBytes()];
+    int copyIndex = 0;
+    for (int i = 0; i < key.length; i++) {
+      System.arraycopy(key[i], 0, fullKey, copyIndex, key[i].length);
+      copyIndex += key[i].length;
+    }
+    return fullKey;
+  }
+
+  @Override public byte[] getKeyByteArray(byte[][] key, int[] columnIndexes) {
+    return null;
+  }
+
+  @Override public long[] getKeyArray(byte[][] key, int[] columnIndexes) {
+    return null;
+  }
+
+  public int[] getBlockKeySize() {
+    return blockKeySize;
+  }
+
+  @Override public int getKeySizeByBlock(int[] blockIndexes) {
+    Set<Integer> selectedRanges = new HashSet<>();
+    for (int i = 0; i < blockIndexes.length; i++) {
+      int[] byteRange = byteRangesForKeys[blockIndexes[i]];
+      for (int j = byteRange[0]; j <= byteRange[1]; j++) {
+        selectedRanges.add(j);
+      }
+    }
+    return selectedRanges.size();
+  }
+
+  @Override public boolean equals(Object obj) {
+    if(!(obj instanceof MultiDimKeyVarLengthVariableSplitGenerator)) {
+      return false;
+    }
+    MultiDimKeyVarLengthVariableSplitGenerator o = (MultiDimKeyVarLengthVariableSplitGenerator)obj;
+    return Arrays.equals(o.dimensionsToSplit, dimensionsToSplit) && super.equals(obj);
+  }
+
+  @Override public int hashCode() {
+    return super.hashCode() + Arrays.hashCode(dimensionsToSplit);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/DirectDictionaryGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/DirectDictionaryGenerator.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/DirectDictionaryGenerator.java
new file mode 100644
index 0000000..595760a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/DirectDictionaryGenerator.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.keygenerator.directdictionary;
+
+/**
+ * The interface provides the method to generate dictionary key
+ * and getting the actual value from the dictionaryKey for direct dictionary column.
+ */
+public interface DirectDictionaryGenerator {
+
+  /**
+   * The method generate and returns the dictionary / surrogate key for direct dictionary column
+   *
+   * @param member The member string value
+   * @return returns dictionary/ surrogate value
+   */
+  int generateDirectSurrogateKey(String member);
+
+  /**
+   * The method returns the actual value of the requested dictionary / surrogate
+   *
+   * @param key
+   * @return dictionary actual member
+   */
+  Object getValueFromSurrogate(int key);
+
+  /**
+   * The method generate and returns the dictionary / surrogate key for direct dictionary column
+   * This Method is called while executing filter queries for getting direct surrogate members.
+   * Currently the query engine layer only supports yyyy-MM-dd HH:mm:ss date format no matter
+   * in which format the data is been stored, so while retrieving the direct surrogate value for
+   * filter member first it should be converted in date form as per above format and needs to
+   * retrieve time stamp.
+   *
+   * @param member The member string value
+   * @return returns dictionary/ surrogate value
+   */
+  int generateDirectSurrogateKey(String memberStr, String format);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/DirectDictionaryKeyGeneratorFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/DirectDictionaryKeyGeneratorFactory.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/DirectDictionaryKeyGeneratorFactory.java
new file mode 100644
index 0000000..b038789
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/DirectDictionaryKeyGeneratorFactory.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.keygenerator.directdictionary;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampDirectDictionaryGenerator;
+
+/**
+ * Factory for DirectDictionary Key generator
+ */
+public final class DirectDictionaryKeyGeneratorFactory {
+  /**
+   * private constructor
+   */
+  private DirectDictionaryKeyGeneratorFactory() {
+
+  }
+
+  /**
+   * The method returns the DirectDictionaryGenerator based for direct dictionary
+   * column based on dataType
+   *
+   * @param dataType DataType
+   * @return the generator instance
+   */
+  public static DirectDictionaryGenerator getDirectDictionaryGenerator(DataType dataType) {
+    DirectDictionaryGenerator directDictionaryGenerator = null;
+    switch (dataType) {
+      case TIMESTAMP:
+        directDictionaryGenerator = TimeStampDirectDictionaryGenerator.instance;
+        break;
+      default:
+
+    }
+    return directDictionaryGenerator;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java
new file mode 100644
index 0000000..7b8e49f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampDirectDictionaryGenerator.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.keygenerator.directdictionary.timestamp;
+
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import org.apache.carbondata.core.util.CarbonProperties;
+
+import static org.apache.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityConstants.TIME_GRAN_DAY;
+import static org.apache.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityConstants.TIME_GRAN_HOUR;
+import static org.apache.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityConstants.TIME_GRAN_MIN;
+import static org.apache.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityConstants.TIME_GRAN_SEC;
+
+/**
+ * The class provides the method to generate dictionary key and getting the actual value from
+ * the dictionaryKey for direct dictionary column for TIMESTAMP type.
+ */
+public class TimeStampDirectDictionaryGenerator implements DirectDictionaryGenerator {
+
+  private TimeStampDirectDictionaryGenerator() {
+
+  }
+
+  public static TimeStampDirectDictionaryGenerator instance =
+      new TimeStampDirectDictionaryGenerator();
+
+  /**
+   * The value of 1 unit of the SECOND, MINUTE, HOUR, or DAY in millis.
+   */
+  public static final long granularityFactor;
+  /**
+   * The date timestamp to be considered as start date for calculating the timestamp
+   * java counts the number of milliseconds from  start of "January 1, 1970", this property is
+   * customized the start of position. for example "January 1, 2000"
+   */
+  public static final long cutOffTimeStamp;
+  /**
+   * Logger instance
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(TimeStampDirectDictionaryGenerator.class.getName());
+
+  /**
+   * initialization block for granularityFactor and cutOffTimeStamp
+   */
+  static {
+    String cutOffTimeStampString = CarbonProperties.getInstance()
+        .getProperty(TimeStampGranularityConstants.CARBON_CUTOFF_TIMESTAMP);
+    String timeGranularity = CarbonProperties.getInstance()
+        .getProperty(TimeStampGranularityConstants.CARBON_TIME_GRANULARITY, TIME_GRAN_SEC);
+    long granularityFactorLocal = 1000;
+    switch (timeGranularity) {
+      case TIME_GRAN_SEC:
+        granularityFactorLocal = TimeStampGranularityTypeValue.MILLIS_SECONDS.getValue();
+        break;
+      case TIME_GRAN_MIN:
+        granularityFactorLocal = TimeStampGranularityTypeValue.MILLIS_MINUTE.getValue();
+        break;
+      case TIME_GRAN_HOUR:
+        granularityFactorLocal = TimeStampGranularityTypeValue.MILLIS_HOUR.getValue();
+        break;
+      case TIME_GRAN_DAY:
+        granularityFactorLocal = TimeStampGranularityTypeValue.MILLIS_DAY.getValue();
+        break;
+      default:
+        granularityFactorLocal = 1000;
+    }
+    long cutOffTimeStampLocal;
+    if (null == cutOffTimeStampString) {
+      cutOffTimeStampLocal = -1;
+    } else {
+      try {
+        SimpleDateFormat timeParser = new SimpleDateFormat(CarbonProperties.getInstance()
+            .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+        timeParser.setLenient(false);
+        Date dateToStr = timeParser.parse(cutOffTimeStampString);
+        cutOffTimeStampLocal = dateToStr.getTime();
+      } catch (ParseException e) {
+        LOGGER.warn("Cannot convert" + cutOffTimeStampString
+            + " to Time/Long type value. Value considered for cutOffTimeStamp is -1." + e
+            .getMessage());
+        cutOffTimeStampLocal = -1;
+      }
+    }
+    granularityFactor = granularityFactorLocal;
+    cutOffTimeStamp = cutOffTimeStampLocal;
+  }
+
+  /**
+   * The method take member String as input and converts
+   * and returns the dictionary key
+   *
+   * @param memberStr date format string
+   * @return dictionary value
+   */
+  @Override public int generateDirectSurrogateKey(String memberStr) {
+    SimpleDateFormat timeParser = new SimpleDateFormat(CarbonProperties.getInstance()
+        .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+            CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+    timeParser.setLenient(false);
+    if (null == memberStr || memberStr.trim().isEmpty() || memberStr
+        .equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
+      return 1;
+    }
+    return getDirectSurrogateForMember(memberStr, timeParser);
+  }
+
+  /**
+   * The method take member String as input and converts
+   * and returns the dictionary key
+   *
+   * @param memberStr date format string
+   * @return dictionary value
+   */
+  public int generateDirectSurrogateKey(String memberStr, String format) {
+    if (null == format) {
+      return generateDirectSurrogateKeyForNonTimestampType(memberStr);
+    } else {
+      SimpleDateFormat timeParser = new SimpleDateFormat(format);
+      timeParser.setLenient(false);
+      if (null == memberStr || memberStr.trim().isEmpty() || memberStr
+          .equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
+        return 1;
+      }
+      return getDirectSurrogateForMember(memberStr, timeParser);
+    }
+  }
+
+  private int getDirectSurrogateForMember(String memberStr, SimpleDateFormat timeParser) {
+    Date dateToStr = null;
+    try {
+      dateToStr = timeParser.parse(memberStr);
+    } catch (ParseException e) {
+      LOGGER.debug(
+          "Cannot convert " + memberStr + " to Time/Long type value. Value considered as null." + e
+              .getMessage());
+      dateToStr = null;
+    }
+    //adding +2 to reserve the first cuttOffDiff value for null or empty date
+    if (null == dateToStr) {
+      return 1;
+    } else {
+      return generateKey(dateToStr.getTime());
+    }
+  }
+
+  /**
+   * The method take dictionary key as input and returns the
+   *
+   * @param key
+   * @return member value/actual value Date
+   */
+  @Override public Object getValueFromSurrogate(int key) {
+    if (key == 1) {
+      return null;
+    }
+    long timeStamp = 0;
+    if (cutOffTimeStamp >= 0) {
+      timeStamp = ((key - 2) * granularityFactor + cutOffTimeStamp);
+    } else {
+      timeStamp = (key - 2) * granularityFactor;
+    }
+    return timeStamp * 1000L;
+  }
+
+  private int generateDirectSurrogateKeyForNonTimestampType(String memberStr) {
+    long timeValue = -1;
+    try {
+      timeValue = Long.valueOf(memberStr) / 1000;
+    } catch (NumberFormatException e) {
+      LOGGER.debug(
+          "Cannot convert " + memberStr + " Long type value. Value considered as null." + e
+              .getMessage());
+    }
+    if (timeValue == -1) {
+      return 1;
+    } else {
+      return generateKey(timeValue);
+    }
+  }
+
+  private int generateKey(long timeValue) {
+    if (cutOffTimeStamp >= 0) {
+      int keyValue = (int) ((timeValue - cutOffTimeStamp) / granularityFactor);
+      return keyValue < 0 ? 1 : keyValue + 2;
+    } else {
+      int keyValue = (int) (timeValue / granularityFactor);
+      return keyValue < 0 ? 1 : keyValue + 2;
+    }
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java
new file mode 100644
index 0000000..e897843
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityConstants.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.keygenerator.directdictionary.timestamp;
+
+/**
+ * Constant related to timestamp conversion
+ */
+public interface TimeStampGranularityConstants {
+
+  /**
+   * The property to set the date to be considered as start date for calculating the timestamp
+   * java counts the number of milliseconds from  start of "January 1, 1970", this property is
+   * customized the start of position. for example "January 1, 2000"
+   */
+  public static final String CARBON_CUTOFF_TIMESTAMP = "carbon.cutOffTimestamp";
+  /**
+   * The property to set the timestamp (ie milis) conversion to the SECOND, MINUTE, HOUR
+   * or DAY level
+   */
+  public static final String CARBON_TIME_GRANULARITY = "carbon.timegranularity";
+
+  /**
+   * Second level key
+   */
+  String TIME_GRAN_SEC = "SECOND";
+  /**
+   * minute level key
+   */
+  String TIME_GRAN_MIN = "MINUTE";
+  /**
+   * hour level key
+   */
+  String TIME_GRAN_HOUR = "HOUR";
+  /**
+   * day level key
+   */
+  String TIME_GRAN_DAY = "DAY";
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityTypeValue.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityTypeValue.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityTypeValue.java
new file mode 100644
index 0000000..3b50a5c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/directdictionary/timestamp/TimeStampGranularityTypeValue.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.keygenerator.directdictionary.timestamp;
+
+/**
+ * Enum constant having the milli second for second, minute, hour, day
+ */
+public enum TimeStampGranularityTypeValue {
+  /**
+   * 1 second value in ms
+   */
+  MILLIS_SECONDS(1000),
+  /**
+   * 1 minute value in ms
+   */
+  MILLIS_MINUTE(1000 * 60),
+  /**
+   * 1 hour value in ms
+   */
+  MILLIS_HOUR(1000 * 60 * 60),
+  /**
+   * 1 day value in ms
+   */
+  MILLIS_DAY(1000 * 60 * 60 * 24);
+
+  /**
+   * enum constant value
+   */
+  private final long value;
+
+  /**
+   * constructor of enum constant
+   *
+   * @param value
+   */
+  private TimeStampGranularityTypeValue(long value) {
+    this.value = value;
+  }
+
+  /**
+   * @return return the value of enum constant
+   */
+  public long getValue() {
+    return this.value;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
new file mode 100644
index 0000000..ab3e7ac
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.keygenerator.factory;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.core.keygenerator.mdkey.MultiDimKeyVarLengthGenerator;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+public final class KeyGeneratorFactory {
+  private KeyGeneratorFactory() {
+
+  }
+
+  public static KeyGenerator getKeyGenerator(int[] dimesion) {
+    int[] incrementedCardinality;
+    boolean isFullyFilled =
+        Boolean.parseBoolean(CarbonCommonConstants.IS_FULLY_FILLED_BITS_DEFAULT_VALUE);
+    if (!isFullyFilled) {
+      incrementedCardinality = CarbonUtil.getIncrementedCardinality(dimesion);
+    } else {
+      incrementedCardinality = CarbonUtil.getIncrementedCardinalityFullyFilled(dimesion);
+    }
+    return new MultiDimKeyVarLengthGenerator(incrementedCardinality);
+  }
+
+  /**
+   *
+   * @param dimCardinality : dimension cardinality
+   * @param columnSplits : No of column in each block
+   * @return keygenerator
+   */
+  public static KeyGenerator getKeyGenerator(int[] dimCardinality, int[] columnSplits) {
+    int[] dimsBitLens = CarbonUtil.getDimensionBitLength(dimCardinality, columnSplits);
+
+    return new MultiDimKeyVarLengthGenerator(dimsBitLens);
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/AbstractKeyGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/AbstractKeyGenerator.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/AbstractKeyGenerator.java
new file mode 100644
index 0000000..76128d4
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/AbstractKeyGenerator.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.keygenerator.mdkey;
+
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+
+public abstract class AbstractKeyGenerator implements KeyGenerator {
+
+  private static final long serialVersionUID = -6675293078575359769L;
+
+  @Override public int compare(byte[] byte1, byte[] byte2) {
+    // Short circuit equal case
+    if (byte1 == byte2) {
+      return 0;
+    }
+    // Bring WritableComparator code local
+    int i = 0;
+    int j = 0;
+    for (; i < byte1.length && j < byte2.length; i++, j++) {
+      int a = (byte1[i] & 0xff);
+      int b = (byte2[j] & 0xff);
+      if (a != b) {
+        return a - b;
+      }
+    }
+    return 0;
+  }
+
+  public int compare(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2,
+      int length2) {
+    length1 += offset1;
+    length2 += offset2;
+    // Bring WritableComparator code local
+    for (; offset1 < length1 && offset2 < length2; offset1++, offset2++) {
+      int a = (buffer1[offset1] & 0xff);
+      int b = (buffer2[offset2] & 0xff);
+      if (a != b) {
+        return a - b;
+      }
+    }
+    return 0;
+  }
+
+  @Override public void setProperty(Object key, Object value) {
+    /**
+     * No implementation required.
+     */
+  }
+
+  @Override public int getKeySizeInBytes() {
+    return 0;
+  }
+
+  @Override public int[] getKeyByteOffsets(int index) {
+    return null;
+  }
+
+  @Override public int getDimCount() {
+    return 0;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/Bits.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/Bits.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/Bits.java
new file mode 100644
index 0000000..2cdbc80
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/Bits.java
@@ -0,0 +1,327 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.keygenerator.mdkey;
+
+import java.io.Serializable;
+import java.util.Arrays;
+
+public class Bits implements Serializable {
+
+  /**
+   * Bits MAX_LENGTH
+   */
+  private static final int MAX_LENGTH = 63;
+  private static final int LONG_LENGTH = 64;
+  /**
+   * serialVersionUID.
+   */
+  private static final long serialVersionUID = 1555114921503304849L;
+  /**
+   * LONG_MAX.
+   */
+  private static final long LONG_MAX = 0x7fffffffffffffffL;
+  /**
+   * length.
+   */
+  private int length = 100;
+  /**
+   * lens.
+   */
+  private int[] lens;
+  /**
+   * wsize.
+   */
+  private int wsize;
+  /**
+   * byteSize.
+   */
+  private int byteSize;
+
+  public Bits(int[] lens) {
+    this.lens = lens;
+    this.length = getTotalLength(lens);
+
+    wsize = length / LONG_LENGTH;
+    byteSize = length / 8;
+
+    if (length % LONG_LENGTH != 0) {
+      wsize++;
+    }
+
+    if (length % 8 != 0) {
+      byteSize++;
+    }
+  }
+
+  public int getByteSize() {
+    return byteSize;
+  }
+
+  private int getTotalLength(int[] lens) {
+    int tLen = 0;
+    for (int len : lens) {
+      tLen += len;
+    }
+    return tLen;
+  }
+
+  public int getDimCount() {
+    return lens.length;
+  }
+
+  /**
+   * Return the start and end Byte offsets of dimension in the MDKey. int []
+   * {start, end}
+   */
+  public int[] getKeyByteOffsets(int index) {
+    int prefixPaddingBits = length % 8 == 0 ? 0 : (8 - length % 8);
+
+    int priorLen = prefixPaddingBits;
+    int start = 0;
+    int end = 0;
+
+    // Calculate prior length for all previous keys
+    for (int i = 0; i < index; i++) {
+      priorLen += lens[i];
+    }
+
+    // Start
+    start = priorLen / 8;
+
+    int tillKeyLength = priorLen + lens[index];
+
+    // End key
+    end = (tillKeyLength) / 8;
+
+    // Consider if end is the last bit. No need to include the next byte.
+    if (tillKeyLength % 8 == 0) {
+      end--;
+    }
+
+    return new int[] { start, end };
+  }
+
+  protected long[] get(long[] keys) {
+    long[] words = new long[wsize];
+    int ll = 0;
+    for (int i = lens.length - 1; i >= 0; i--) {
+
+      long val = keys[i];
+
+      int idx = ll >> 6;// divide by 64 to get the new word index
+      int position = ll & 0x3f;// to ignore sign bit and consider the remaining
+      val = val & (LONG_MAX >> (MAX_LENGTH - lens[i]));// To control the
+      // logic so that
+      // any val do not
+      // exceed the
+      // cardinality
+      long mask = (val << position);
+      long word = words[idx];
+      words[idx] = (word | mask);
+      ll += lens[i];
+
+      int nextIndex = ll >> 6;// This is divide by 64
+
+      if (nextIndex != idx) {
+        int consideredBits = lens[i] - ll & 0x3f;
+        if (consideredBits < lens[i]) //Check for spill over only if all the bits are not considered
+        {
+          mask = (val >> (lens[i] - ll & 0x3f));//& (0x7fffffffffffffffL >> (0x3f-pos));
+          word = words[nextIndex];
+          words[nextIndex] = (word | mask);
+        }
+      }
+
+    }
+
+    return words;
+  }
+
+  protected long[] get(int[] keys) {
+    long[] words = new long[wsize];
+    int ll = 0;
+    for (int i = lens.length - 1; i >= 0; i--) {
+
+      long val = keys[i];
+
+      int index = ll >> 6;// divide by 64 to get the new word index
+      int pos = ll & 0x3f;// to ignore sign bit and consider the remaining
+      val = val & (LONG_MAX >> (MAX_LENGTH - lens[i]));// To control the
+      // logic so that
+      // any val do not
+      // exceed the
+      // cardinality
+      long mask = (val << pos);
+      long word = words[index];
+      words[index] = (word | mask);
+      ll += lens[i];
+
+      int nextIndex = ll >> 6;// This is divide by 64
+
+      if (nextIndex != index) {
+        int consideredBits = lens[i] - ll & 0x3f;
+        if (consideredBits < lens[i]) //Check for spill over only if all the bits are not considered
+        {
+          // Check for spill over
+          mask = (val >> (lens[i] - ll & 0x3f));
+          word = words[nextIndex];
+          words[nextIndex] = (word | mask);
+        }
+      }
+
+    }
+
+    return words;
+  }
+
+  private long[] getArray(long[] words) {
+    long[] vals = new long[lens.length];
+    int ll = 0;
+    for (int i = lens.length - 1; i >= 0; i--) {
+
+      int index = ll >> 6;
+      int pos = ll & 0x3f;
+      long val = words[index];
+      long mask = (LONG_MAX >>> (MAX_LENGTH - lens[i]));
+      mask = mask << pos;
+      vals[i] = (val & mask);
+      vals[i] >>>= pos;
+      ll += lens[i];
+
+      int nextIndex = ll >> 6;
+      if (nextIndex != index) {
+        pos = ll & 0x3f;
+        if (pos != 0) // Number of bits pending for current key is zero, no spill over
+        {
+          mask = (LONG_MAX >>> (MAX_LENGTH - pos));
+          val = words[nextIndex];
+          vals[i] = vals[i] | ((val & mask) << (lens[i] - pos));
+        }
+      }
+    }
+    return vals;
+  }
+
+  public byte[] getBytes(long[] keys) {
+
+    long[] words = get(keys);
+
+    return getBytesVal(words);
+  }
+
+  private byte[] getBytesVal(long[] words) {
+    int length = 8;
+    byte[] bytes = new byte[byteSize];
+
+    int l = byteSize - 1;
+    for (int i = 0; i < words.length; i++) {
+      long val = words[i];
+
+      for (int j = length - 1; j > 0 && l > 0; j--) {
+        bytes[l] = (byte) val;
+        val >>>= 8;
+        l--;
+      }
+      bytes[l] = (byte) val;
+      l--;
+    }
+    return bytes;
+  }
+
+  public byte[] getBytes(int[] keys) {
+
+    long[] words = get(keys);
+
+    return getBytesVal(words);
+  }
+
+  public long[] getKeyArray(byte[] key, int offset) {
+
+    int length = 8;
+    int ls = byteSize;
+    long[] words = new long[wsize];
+    for (int i = 0; i < words.length; i++) {
+      long l = 0;
+      ls -= 8;
+      int m = 0;
+      if (ls < 0) {
+        m = ls + length;
+        ls = 0;
+      } else {
+        m = ls + 8;
+      }
+      for (int j = ls; j < m; j++) {
+        l <<= 8;
+        l ^= key[j + offset] & 0xFF;
+      }
+      words[i] = l;
+    }
+
+    return getArray(words);
+
+  }
+
+  public long[] getKeyArray(byte[] key, int[] maskByteRanges) {
+
+    int length = 8;
+    int ls = byteSize;
+    long[] words = new long[wsize];
+    for (int i = 0; i < words.length; i++) {
+      long l = 0;
+      ls -= 8;
+      int m2 = 0;
+      if (ls < 0) {
+        m2 = ls + length;
+        ls = 0;
+      } else {
+        m2 = ls + 8;
+      }
+      if (maskByteRanges == null) {
+        for (int j = ls; j < m2; j++) {
+          l <<= 8;
+          l ^= key[j] & 0xFF;
+        }
+      } else {
+        for (int j = ls; j < m2; j++) {
+          l <<= 8;
+          if (maskByteRanges[j] != -1) {
+            l ^= key[maskByteRanges[j]] & 0xFF;
+          }
+        }
+      }
+      words[i] = l;
+    }
+
+    return getArray(words);
+
+  }
+
+  @Override public boolean equals(Object obj) {
+    if (obj instanceof Bits) {
+      Bits other = (Bits) obj;
+      return Arrays.equals(lens, other.lens);
+    }
+    return false;
+  }
+
+  @Override public int hashCode() {
+    return Arrays.hashCode(lens);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/MultiDimKeyVarLengthGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/MultiDimKeyVarLengthGenerator.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/MultiDimKeyVarLengthGenerator.java
new file mode 100644
index 0000000..6f8dd7d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/MultiDimKeyVarLengthGenerator.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.keygenerator.mdkey;
+
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+
+public class MultiDimKeyVarLengthGenerator extends AbstractKeyGenerator {
+
+  private static final long serialVersionUID = 9134778127271586515L;
+  /**
+   *
+   */
+  protected int[][] byteRangesForKeys;
+  private Bits bits;
+  private int startAndEndKeySizeWithPrimitives;
+
+  public MultiDimKeyVarLengthGenerator(int[] lens) {
+    bits = new Bits(lens);
+    byteRangesForKeys = new int[lens.length][];
+    int keys = lens.length;
+    for (int i = 0; i < keys; i++) {
+      byteRangesForKeys[i] = bits.getKeyByteOffsets(i);
+    }
+  }
+
+  @Override public byte[] generateKey(long[] keys) throws KeyGenException {
+
+    return bits.getBytes(keys);
+  }
+
+  @Override public byte[] generateKey(int[] keys) throws KeyGenException {
+
+    return bits.getBytes(keys);
+  }
+
+  @Override public long[] getKeyArray(byte[] key) {
+
+    return bits.getKeyArray(key, 0);
+  }
+
+  @Override public long[] getKeyArray(byte[] key, int offset) {
+
+    return bits.getKeyArray(key, offset);
+  }
+
+  @Override public long getKey(byte[] key, int index) {
+
+    return bits.getKeyArray(key, 0)[index];
+  }
+
+  public int getKeySizeInBytes() {
+    return bits.getByteSize();
+  }
+
+  @Override public long[] getSubKeyArray(byte[] key, int index, int size) {
+    if (index < 0 || size == 0) {
+      return null;
+    }
+    long[] keys = bits.getKeyArray(key, 0);
+    long[] rtn = new long[size];
+    System.arraycopy(keys, index, rtn, 0, size);
+    return rtn;
+  }
+
+  @Override public int[] getKeyByteOffsets(int index) {
+    return byteRangesForKeys[index];
+  }
+
+  @Override public int getDimCount() {
+
+    return bits.getDimCount();
+  }
+
+  @Override public boolean equals(Object obj) {
+    if (obj instanceof MultiDimKeyVarLengthGenerator) {
+      MultiDimKeyVarLengthGenerator other = (MultiDimKeyVarLengthGenerator) obj;
+      return bits.equals(other.bits);
+    }
+
+    return false;
+  }
+
+  @Override public int hashCode() {
+    return bits.hashCode();
+  }
+
+  @Override public long[] getKeyArray(byte[] key, int[] maskedByteRanges) {
+    return bits.getKeyArray(key, maskedByteRanges);
+  }
+
+  @Override public int getStartAndEndKeySizeWithOnlyPrimitives() {
+    return startAndEndKeySizeWithPrimitives;
+  }
+
+  @Override
+  public void setStartAndEndKeySizeWithOnlyPrimitives(int startAndEndKeySizeWithPrimitives) {
+    this.startAndEndKeySizeWithPrimitives = startAndEndKeySizeWithPrimitives;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/NumberCompressor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/NumberCompressor.java b/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/NumberCompressor.java
new file mode 100644
index 0000000..b83cf71
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/keygenerator/mdkey/NumberCompressor.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.keygenerator.mdkey;
+
+/**
+ * It compresses the data as per max cardinality. It takes only the required bits for each key.
+ */
+public class NumberCompressor {
+
+  /**
+   * Bits MAX_LENGTH
+   */
+  private static final int MAX_LENGTH = 63;
+
+  private static final int LONG_LENGTH = 64;
+
+  private static final int BYTE_LENGTH = 8;
+
+  /**
+   * LONG_MAX.
+   */
+  private static final long LONG_MAX = 0x7fffffffffffffffL;
+
+  private byte bitsLength;
+
+  public NumberCompressor(int cardinaity) {
+    bitsLength = (byte) Long.toBinaryString(cardinaity).length();
+  }
+
+  public byte[] compress(int[] keys) {
+    int[] sizes = getWordsAndByteSize(keys.length);
+    long[] words = get(keys, sizes[0]);
+
+    return getByteValues(sizes, words);
+  }
+
+  private byte[] getByteValues(int[] sizes, long[] words) {
+    byte[] bytes = new byte[sizes[1]];
+
+    int l = sizes[1] - 1;
+    for (int i = 0; i < words.length; i++) {
+      long val = words[i];
+
+      for (int j = BYTE_LENGTH - 1; j > 0 && l > 0; j--) {
+        bytes[l] = (byte) val;
+        val >>>= 8;
+        l--;
+      }
+      bytes[l] = (byte) val;
+      l--;
+    }
+    return bytes;
+  }
+
+  protected long[] get(int[] keys, int wsize) {
+    long[] words = new long[wsize];
+    int ll = 0;
+    int index = 0;
+    int pos = 0;
+    int nextIndex = 0;
+    for (int i = keys.length - 1; i >= 0; i--) {
+
+      long val = keys[i];
+
+      index = ll >> 6;// divide by 64 to get the new word index
+      pos = ll & 0x3f;// to ignore sign bit and consider the remaining
+      //            val = val & controlBits;
+      long mask = (val << pos);
+      long word = words[index];
+      words[index] = (word | mask);
+      ll += bitsLength;
+
+      nextIndex = ll >> 6;// This is divide by 64
+
+      if (nextIndex != index) {
+        int consideredBits = bitsLength - ll & 0x3f;
+        if (consideredBits < bitsLength) // Check for spill over only if
+        // all the bits are not
+        // considered
+        {
+          // Check for spill over
+          mask = (val >> (bitsLength - ll & 0x3f));
+          words[nextIndex] |= mask;
+        }
+      }
+
+    }
+    return words;
+  }
+
+  protected long[] get(byte[] keys, int wsize) {
+    long[] words = new long[wsize];
+    int ll = 0;
+    long val = 0L;
+    for (int i = keys.length - 1; i >= 0; ) {
+
+      int size = i;
+      val = 0L;
+      for (int j = i + 1; j <= size; ) {
+        val <<= BYTE_LENGTH;
+        val ^= keys[j++] & 0xFF;
+        i--;
+      }
+      int index = ll >> 6;// divide by 64 to get the new word index
+      words[index] |= (val << (ll & 0x3f));
+      ll += bitsLength;
+
+      int nextIndex = ll >> 6;// This is divide by 64
+
+      if (nextIndex != index) {
+        int consideredBits = bitsLength - ll & 0x3f;
+        if (consideredBits < bitsLength) // Check for spill over only if
+        // all the bits are not
+        // considered
+        {
+          // Check for spill over
+          words[nextIndex] |= (val >> (bitsLength - ll & 0x3f));
+        }
+      }
+
+    }
+    return words;
+  }
+
+  public int[] unCompress(byte[] key) {
+    int ls = key.length;
+    int arrayLength = (ls * BYTE_LENGTH) / bitsLength;
+    long[] words = new long[getWordsSizeFromBytesSize(ls)];
+    unCompressVal(key, ls, words);
+    return getArray(words, arrayLength);
+  }
+
+  private void unCompressVal(byte[] key, int ls, long[] words) {
+    for (int i = 0; i < words.length; i++) {
+      long l = 0;
+      ls -= BYTE_LENGTH;
+      int m = 0;
+      if (ls < 0) {
+        m = ls + BYTE_LENGTH;
+        ls = 0;
+      } else {
+        m = ls + BYTE_LENGTH;
+      }
+      for (int j = ls; j < m; j++) {
+        l <<= BYTE_LENGTH;
+        l ^= key[j] & 0xFF;
+      }
+      words[i] = l;
+    }
+  }
+
+  private int[] getArray(long[] words, int arrayLength) {
+    int[] vals = new int[arrayLength];
+    int ll = 0;
+    long globalMask = LONG_MAX >>> (MAX_LENGTH - bitsLength);
+    for (int i = arrayLength - 1; i >= 0; i--) {
+
+      int index = ll >> 6;
+      int pos = ll & 0x3f;
+      long val = words[index];
+      long mask = globalMask << pos;
+      long value = (val & mask) >>> pos;
+      ll += bitsLength;
+
+      int nextIndex = ll >> 6;
+      if (nextIndex != index) {
+        pos = ll & 0x3f;
+        if (pos != 0) // Number of bits pending for current key is zero, no spill over
+        {
+          mask = (LONG_MAX >>> (MAX_LENGTH - pos));
+          val = words[nextIndex];
+          value = value | ((val & mask) << (bitsLength - pos));
+        }
+      }
+      vals[i] = (int) value;
+    }
+    return vals;
+  }
+
+  private int[] getWordsAndByteSize(int arrayLength) {
+    int length = arrayLength * bitsLength;
+    int wsize = length / LONG_LENGTH;
+    int byteSize = length / BYTE_LENGTH;
+
+    if (length % LONG_LENGTH != 0) {
+      wsize++;
+    }
+
+    if (length % BYTE_LENGTH != 0) {
+      byteSize++;
+    }
+    return new int[] { wsize, byteSize };
+  }
+
+  private int getWordsSizeFromBytesSize(int byteSize) {
+    int wsize = byteSize / BYTE_LENGTH;
+    if (byteSize % BYTE_LENGTH != 0) {
+      wsize++;
+    }
+    return wsize;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/load/BlockDetails.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/load/BlockDetails.java b/core/src/main/java/org/apache/carbondata/core/load/BlockDetails.java
new file mode 100644
index 0000000..c3fd997
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/load/BlockDetails.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.load;
+
+import java.io.Serializable;
+
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+
+/**
+ * blocks info
+ */
+public class BlockDetails implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = 2293906691860002339L;
+  //block offset
+  private long blockOffset;
+  //block length
+  private long blockLength;
+  //file path which block belong to
+  private String filePath;
+  // locations where this block exists
+  private String[] locations;
+
+  public BlockDetails(String filePath, long blockOffset, long blockLength, String[] locations) {
+    this.filePath = filePath;
+    this.blockOffset = blockOffset;
+    this.blockLength = blockLength;
+    this.locations = locations;
+  }
+
+  public long getBlockOffset() {
+    return blockOffset;
+  }
+
+  public void setBlockOffset(long blockOffset) {
+    this.blockOffset = blockOffset;
+  }
+
+  public long getBlockLength() {
+    return blockLength;
+  }
+
+  public void setBlockLength(long blockLength) {
+    this.blockLength = blockLength;
+  }
+
+  public String getFilePath() {
+    return FileFactory.getUpdatedFilePath(filePath);
+  }
+
+  public void setFilePath(String filePath) {
+    this.filePath = filePath;
+  }
+
+  public String[] getLocations() {
+    return locations;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/load/LoadMetadataDetails.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/load/LoadMetadataDetails.java b/core/src/main/java/org/apache/carbondata/core/load/LoadMetadataDetails.java
new file mode 100644
index 0000000..7c58ae7
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/load/LoadMetadataDetails.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.load;
+
+import java.io.Serializable;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+public class LoadMetadataDetails implements Serializable {
+
+  private static final long serialVersionUID = 1106104914918491724L;
+  private String timestamp;
+  private String loadStatus;
+  private String loadName;
+  private String partitionCount;
+
+  /**
+   * LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(LoadMetadataDetails.class.getName());
+
+  private static final SimpleDateFormat parser =
+      new SimpleDateFormat(CarbonCommonConstants.CARBON_TIMESTAMP);
+  /**
+   * Segment modification or deletion time stamp
+   */
+  private String modificationOrdeletionTimesStamp;
+  private String loadStartTime;
+
+  private String mergedLoadName;
+  /**
+   * visibility is used to determine whether to the load is visible or not.
+   */
+  private String visibility = "true";
+
+  /**
+   * To know if the segment is a major compacted segment or not.
+   */
+  private String majorCompacted;
+
+  public String getPartitionCount() {
+    return partitionCount;
+  }
+
+  public void setPartitionCount(String partitionCount) {
+    this.partitionCount = partitionCount;
+  }
+
+  public String getTimestamp() {
+    return timestamp;
+  }
+
+  public void setTimestamp(String timestamp) {
+    this.timestamp = timestamp;
+  }
+
+  public String getLoadStatus() {
+    return loadStatus;
+  }
+
+  public void setLoadStatus(String loadStatus) {
+    this.loadStatus = loadStatus;
+  }
+
+  public String getLoadName() {
+    return loadName;
+  }
+
+  public void setLoadName(String loadName) {
+    this.loadName = loadName;
+  }
+
+  /**
+   * @return the modificationOrdeletionTimesStamp
+   */
+  public String getModificationOrdeletionTimesStamp() {
+    return modificationOrdeletionTimesStamp;
+  }
+
+  /**
+   * @param modificationOrdeletionTimesStamp the modificationOrdeletionTimesStamp to set
+   */
+  public void setModificationOrdeletionTimesStamp(String modificationOrdeletionTimesStamp) {
+    this.modificationOrdeletionTimesStamp = modificationOrdeletionTimesStamp;
+  }
+
+  /* (non-Javadoc)
+   * @see java.lang.Object#hashCode()
+   */
+  @Override public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((loadName == null) ? 0 : loadName.hashCode());
+    return result;
+  }
+
+  /* (non-Javadoc)
+   * @see java.lang.Object#equals(java.lang.Object)
+   */
+  @Override public boolean equals(Object obj) {
+    if (obj == null) {
+      return false;
+
+    }
+    if (!(obj instanceof LoadMetadataDetails)) {
+      return false;
+    }
+    LoadMetadataDetails other = (LoadMetadataDetails) obj;
+    if (loadName == null) {
+      if (other.loadName != null) {
+        return false;
+      }
+    } else if (!loadName.equals(other.loadName)) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * @return the startLoadTime
+   */
+  public String getLoadStartTime() {
+    return loadStartTime;
+  }
+
+  /**
+   * return loadStartTime
+   * @return
+   */
+  public long getLoadStartTimeAsLong() {
+    return getTimeStamp(loadStartTime);
+  }
+
+  /**
+   * returns load start time as long value
+   * @param loadStartTime
+   * @return
+   */
+  private Long getTimeStamp(String loadStartTime) {
+    if (loadStartTime.isEmpty()) {
+      return null;
+    }
+
+    Date dateToStr = null;
+    try {
+      dateToStr = parser.parse(loadStartTime);
+      return dateToStr.getTime() * 1000;
+    } catch (ParseException e) {
+      LOGGER.error("Cannot convert" + loadStartTime + " to Time/Long type value" + e.getMessage());
+      return null;
+    }
+  }
+  /**
+   * @param loadStartTime
+   */
+  public void setLoadStartTime(String loadStartTime) {
+    this.loadStartTime = loadStartTime;
+  }
+
+  /**
+   * @return the mergedLoadName
+   */
+  public String getMergedLoadName() {
+    return mergedLoadName;
+  }
+
+  /**
+   * @param mergedLoadName the mergedLoadName to set
+   */
+  public void setMergedLoadName(String mergedLoadName) {
+    this.mergedLoadName = mergedLoadName;
+  }
+
+  /**
+   * @return the visibility
+   */
+  public String getVisibility() {
+    return visibility;
+  }
+
+  /**
+   * @param visibility the visibility to set
+   */
+  public void setVisibility(String visibility) {
+    this.visibility = visibility;
+  }
+
+  /**
+   * Return true if it is a major compacted segment.
+   * @return
+   */
+  public String isMajorCompacted() {
+    return majorCompacted;
+  }
+
+  /**
+   * Set true if it is a major compacted segment.
+   * @param majorCompacted
+   */
+  public void setMajorCompacted(String majorCompacted) {
+    this.majorCompacted = majorCompacted;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/metadata/BlockletInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/BlockletInfo.java b/core/src/main/java/org/apache/carbondata/core/metadata/BlockletInfo.java
new file mode 100644
index 0000000..5cfa11b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/BlockletInfo.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.metadata;
+
+public class BlockletInfo {
+  /**
+   * fileName.
+   */
+  private String fileName;
+
+  /**
+   * keyOffset.
+   */
+  private long keyOffset;
+
+  /**
+   * measureOffset.
+   */
+  private long[] measureOffset;
+
+  /**
+   * measureLength.
+   */
+  private int[] measureLength;
+
+  /**
+   * keyLength.
+   */
+  private int keyLength;
+
+  /**
+   * numberOfKeys.
+   */
+  private int numberOfKeys;
+
+  /**
+   * startKey.
+   */
+  private byte[] startKey;
+
+  /**
+   * endKey.
+   */
+  private byte[] endKey;
+
+  /**
+   * getFileName().
+   *
+   * @return String.
+   */
+  public String getFileName() {
+    return fileName;
+  }
+
+  /**
+   * setFileName.
+   */
+  public void setFileName(String fileName) {
+    this.fileName = fileName;
+  }
+
+  /**
+   * getKeyOffset.
+   *
+   * @return long.
+   */
+  public long getKeyOffset() {
+    return keyOffset;
+  }
+
+  /**
+   * setKeyOffset.
+   *
+   * @param keyOffset
+   */
+  public void setKeyOffset(long keyOffset) {
+    this.keyOffset = keyOffset;
+  }
+
+  /**
+   * getMeasureLength
+   *
+   * @return int[].
+   */
+  public int[] getMeasureLength() {
+    return measureLength;
+  }
+
+  /**
+   * setMeasureLength.
+   *
+   * @param measureLength
+   */
+  public void setMeasureLength(int[] measureLength) {
+    this.measureLength = measureLength;
+  }
+
+  /**
+   * getKeyLength.
+   *
+   * @return
+   */
+  public int getKeyLength() {
+    return keyLength;
+  }
+
+  /**
+   * setKeyLength.
+   */
+  public void setKeyLength(int keyLength) {
+    this.keyLength = keyLength;
+  }
+
+  /**
+   * getMeasureOffset.
+   *
+   * @return long[].
+   */
+  public long[] getMeasureOffset() {
+    return measureOffset;
+  }
+
+  /**
+   * setMeasureOffset.
+   *
+   * @param measureOffset
+   */
+  public void setMeasureOffset(long[] measureOffset) {
+    this.measureOffset = measureOffset;
+  }
+
+  /**
+   * getNumberOfKeys()
+   *
+   * @return int.
+   */
+  public int getNumberOfKeys() {
+    return numberOfKeys;
+  }
+
+  /**
+   * setNumberOfKeys.
+   *
+   * @param numberOfKeys
+   */
+  public void setNumberOfKeys(int numberOfKeys) {
+    this.numberOfKeys = numberOfKeys;
+  }
+
+  /**
+   * getStartKey().
+   *
+   * @return byte[].
+   */
+  public byte[] getStartKey() {
+    return startKey;
+  }
+
+  /**
+   * setStartKey.
+   *
+   * @param startKey
+   */
+  public void setStartKey(byte[] startKey) {
+    this.startKey = startKey;
+  }
+
+  /**
+   * getEndKey().
+   *
+   * @return byte[].
+   */
+  public byte[] getEndKey() {
+    return endKey;
+  }
+
+  /**
+   * setEndKey.
+   *
+   * @param endKey
+   */
+  public void setEndKey(byte[] endKey) {
+    this.endKey = endKey;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/metadata/BlockletInfoColumnar.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/BlockletInfoColumnar.java b/core/src/main/java/org/apache/carbondata/core/metadata/BlockletInfoColumnar.java
new file mode 100644
index 0000000..54e5361
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/BlockletInfoColumnar.java
@@ -0,0 +1,405 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.metadata;
+
+import java.util.BitSet;
+
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.keygenerator.mdkey.NumberCompressor;
+
+public class BlockletInfoColumnar {
+  /**
+   * fileName.
+   */
+  private String fileName;
+
+  /**
+   * measureOffset.
+   */
+  private long[] measureOffset;
+
+  /**
+   * measureLength.
+   */
+  private int[] measureLength;
+
+  /**
+   * numberOfKeys.
+   */
+  private int numberOfKeys;
+
+  /**
+   * startKey.
+   */
+  private byte[] startKey;
+
+  /**
+   * endKey.
+   */
+  private byte[] endKey;
+
+  /**
+   * keyOffSets
+   */
+  private long[] keyOffSets;
+
+  /**
+   * keyLengths
+   */
+  private int[] keyLengths;
+
+  /**
+   * isSortedKeyColumn
+   */
+  private boolean[] isSortedKeyColumn;
+
+  /**
+   * keyBlockIndexOffSets
+   */
+  private long[] keyBlockIndexOffSets;
+
+  /**
+   * keyBlockIndexLength
+   */
+  private int[] keyBlockIndexLength;
+
+  /**
+   * dataIndexMap
+   */
+  private int[] dataIndexMapLength;
+
+  /**
+   * dataIndexMap
+   */
+  private long[] dataIndexMapOffsets;
+
+  private boolean[] aggKeyBlock;
+  /**
+   * blockletMetaSize
+   */
+  private int blockletMetaSize;
+
+  private NumberCompressor[] keyBlockUnCompressor;
+
+  private ValueCompressionModel compressionModel;
+
+  /**
+   * column min array
+   */
+  private byte[][] columnMaxData;
+
+  /**
+   * column max array
+   */
+  private byte[][] columnMinData;
+
+  /**
+   * true if given index is colgroup block
+   */
+  private boolean[] colGrpBlock;
+
+  /**
+   * bit set which will holds the measure
+   * indexes which are null
+   */
+  private BitSet[] measureNullValueIndex;
+
+  /**
+   * getFileName().
+   *
+   * @return String.
+   */
+  public String getFileName() {
+    return fileName;
+  }
+
+  /**
+   * setFileName.
+   */
+  public void setFileName(String fileName) {
+    this.fileName = fileName;
+  }
+
+  /**
+   * getMeasureLength
+   *
+   * @return int[].
+   */
+  public int[] getMeasureLength() {
+    return measureLength;
+  }
+
+  /**
+   * setMeasureLength.
+   *
+   * @param measureLength
+   */
+  public void setMeasureLength(int[] measureLength) {
+    this.measureLength = measureLength;
+  }
+
+  /**
+   * getMeasureOffset.
+   *
+   * @return long[].
+   */
+  public long[] getMeasureOffset() {
+    return measureOffset;
+  }
+
+  /**
+   * setMeasureOffset.
+   *
+   * @param measureOffset
+   */
+  public void setMeasureOffset(long[] measureOffset) {
+    this.measureOffset = measureOffset;
+  }
+
+  /**
+   * getStartKey().
+   *
+   * @return byte[].
+   */
+  public byte[] getStartKey() {
+    return startKey;
+  }
+
+  /**
+   * setStartKey.
+   *
+   * @param startKey
+   */
+  public void setStartKey(byte[] startKey) {
+    this.startKey = startKey;
+  }
+
+  /**
+   * getEndKey().
+   *
+   * @return byte[].
+   */
+  public byte[] getEndKey() {
+    return endKey;
+  }
+
+  /**
+   * setEndKey.
+   *
+   * @param endKey
+   */
+  public void setEndKey(byte[] endKey) {
+    this.endKey = endKey;
+  }
+
+  /**
+   * @return the keyOffSets
+   */
+  public long[] getKeyOffSets() {
+    return keyOffSets;
+  }
+
+  /**
+   * @param keyOffSets the keyOffSets to set
+   */
+  public void setKeyOffSets(long[] keyOffSets) {
+    this.keyOffSets = keyOffSets;
+  }
+
+  /**
+   * @return the keyLengths
+   */
+  public int[] getKeyLengths() {
+    return keyLengths;
+  }
+
+  //TODO SIMIAN
+
+  /**
+   * @param keyLengths the keyLengths to set
+   */
+  public void setKeyLengths(int[] keyLengths) {
+    this.keyLengths = keyLengths;
+  }
+
+  /**
+   * getNumberOfKeys()
+   *
+   * @return int.
+   */
+  public int getNumberOfKeys() {
+    return numberOfKeys;
+  }
+
+  /**
+   * setNumberOfKeys.
+   *
+   * @param numberOfKeys
+   */
+  public void setNumberOfKeys(int numberOfKeys) {
+    this.numberOfKeys = numberOfKeys;
+  }
+
+  /**
+   * @return the isSortedKeyColumn
+   */
+  public boolean[] getIsSortedKeyColumn() {
+    return isSortedKeyColumn;
+  }
+
+  /**
+   * @param isSortedKeyColumn the isSortedKeyColumn to set
+   */
+  public void setIsSortedKeyColumn(boolean[] isSortedKeyColumn) {
+    this.isSortedKeyColumn = isSortedKeyColumn;
+  }
+
+  /**
+   * @return the keyBlockIndexOffSets
+   */
+  public long[] getKeyBlockIndexOffSets() {
+    return keyBlockIndexOffSets;
+  }
+
+  /**
+   * @param keyBlockIndexOffSets the keyBlockIndexOffSets to set
+   */
+  public void setKeyBlockIndexOffSets(long[] keyBlockIndexOffSets) {
+    this.keyBlockIndexOffSets = keyBlockIndexOffSets;
+  }
+
+  /**
+   * @return the keyBlockIndexLength
+   */
+  public int[] getKeyBlockIndexLength() {
+    return keyBlockIndexLength;
+  }
+
+  /**
+   * @param keyBlockIndexLength the keyBlockIndexLength to set
+   */
+  public void setKeyBlockIndexLength(int[] keyBlockIndexLength) {
+    this.keyBlockIndexLength = keyBlockIndexLength;
+  }
+
+  /**
+   * @return the blockletMetaSize
+   */
+  public int getBlockletMetaSize() {
+    return blockletMetaSize;
+  }
+
+  /**
+   * @param blockletMetaSize the blockletMetaSize to set
+   */
+  public void setBlockletMetaSize(int blockletMetaSize) {
+    this.blockletMetaSize = blockletMetaSize;
+  }
+
+  /**
+   * @return the dataIndexMapLenght
+   */
+  public int[] getDataIndexMapLength() {
+    return dataIndexMapLength;
+  }
+
+  public void setDataIndexMapLength(int[] dataIndexMapLength) {
+    this.dataIndexMapLength = dataIndexMapLength;
+  }
+
+  /**
+   * @return the dataIndexMapOffsets
+   */
+  public long[] getDataIndexMapOffsets() {
+    return dataIndexMapOffsets;
+  }
+
+  public void setDataIndexMapOffsets(long[] dataIndexMapOffsets) {
+    this.dataIndexMapOffsets = dataIndexMapOffsets;
+  }
+
+  public boolean[] getAggKeyBlock() {
+    return aggKeyBlock;
+  }
+
+  public void setAggKeyBlock(boolean[] aggKeyBlock) {
+    this.aggKeyBlock = aggKeyBlock;
+  }
+
+  public NumberCompressor[] getKeyBlockUnCompressor() {
+    return keyBlockUnCompressor;
+  }
+
+  public void setKeyBlockUnCompressor(NumberCompressor[] keyBlockUnCompressor) {
+    this.keyBlockUnCompressor = keyBlockUnCompressor;
+  }
+
+  public byte[][] getColumnMaxData() {
+    return this.columnMaxData;
+  }
+
+  public void setColumnMaxData(byte[][] columnMaxData) {
+    this.columnMaxData = columnMaxData;
+  }
+
+  public byte[][] getColumnMinData() {
+    return this.columnMinData;
+  }
+
+  public void setColumnMinData(byte[][] columnMinData) {
+    this.columnMinData = columnMinData;
+  }
+
+  public ValueCompressionModel getCompressionModel() {
+    return compressionModel;
+  }
+
+  public void setCompressionModel(ValueCompressionModel compressionModel) {
+    this.compressionModel = compressionModel;
+  }
+
+  /**
+   * @return
+   */
+  public boolean[] getColGrpBlocks() {
+    return this.colGrpBlock;
+  }
+
+  /**
+   * @param colGrpBlock
+   */
+  public void setColGrpBlocks(boolean[] colGrpBlock) {
+    this.colGrpBlock = colGrpBlock;
+  }
+
+  /**
+   * @return the measureNullValueIndex
+   */
+  public BitSet[] getMeasureNullValueIndex() {
+    return measureNullValueIndex;
+  }
+
+  /**
+   * @param measureNullValueIndex the measureNullValueIndex to set
+   */
+  public void setMeasureNullValueIndex(BitSet[] measureNullValueIndex) {
+    this.measureNullValueIndex = measureNullValueIndex;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/metadata/ValueEncoderMeta.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/ValueEncoderMeta.java b/core/src/main/java/org/apache/carbondata/core/metadata/ValueEncoderMeta.java
new file mode 100644
index 0000000..471158d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/ValueEncoderMeta.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.metadata;
+
+import java.io.Serializable;
+
+/**
+ * It holds Value compression metadata for one data column
+ */
+public class ValueEncoderMeta implements Serializable {
+
+  /**
+   * maxValue
+   */
+  private Object maxValue;
+  /**
+   * minValue.
+   */
+  private Object minValue;
+
+  /**
+   * uniqueValue
+   */
+  private Object uniqueValue;
+  /**
+   * decimal.
+   */
+  private int decimal;
+
+  /**
+   * aggType
+   */
+  private char type;
+
+  /**
+   * dataTypeSelected
+   */
+  private byte dataTypeSelected;
+
+  public Object getMaxValue() {
+    return maxValue;
+  }
+
+  public void setMaxValue(Object maxValue) {
+    this.maxValue = maxValue;
+  }
+
+  public Object getMinValue() {
+    return minValue;
+  }
+
+  public void setMinValue(Object minValue) {
+    this.minValue = minValue;
+  }
+
+  public Object getUniqueValue() {
+    return uniqueValue;
+  }
+
+  public void setUniqueValue(Object uniqueValue) {
+    this.uniqueValue = uniqueValue;
+  }
+
+  public int getDecimal() {
+    return decimal;
+  }
+
+  public void setDecimal(int decimal) {
+    this.decimal = decimal;
+  }
+
+  public char getType() {
+    return type;
+  }
+
+  public void setType(char type) {
+    this.type = type;
+  }
+
+  public byte getDataTypeSelected() {
+    return dataTypeSelected;
+  }
+
+  public void setDataTypeSelected(byte dataTypeSelected) {
+    this.dataTypeSelected = dataTypeSelected;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryColumnMetaChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryColumnMetaChunk.java b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryColumnMetaChunk.java
new file mode 100644
index 0000000..3c4658e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryColumnMetaChunk.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.reader;
+
+/**
+ * A wrapper class for thrift class ColumnDictionaryChunkMeta which will
+ * contain data like min and max surrogate key, start and end offset, chunk count
+ */
+public class CarbonDictionaryColumnMetaChunk {
+
+  /**
+   * Minimum value surrogate key for a segment
+   */
+  private int min_surrogate_key;
+
+  /**
+   * Max value of surrogate key for a segment
+   */
+  private int max_surrogate_key;
+
+  /**
+   * start offset of dictionary chunk in dictionary file for a segment
+   */
+  private long start_offset;
+
+  /**
+   * end offset of dictionary chunk in dictionary file for a segment
+   */
+  private long end_offset;
+
+  /**
+   * count of dictionary chunks for a segment
+   */
+  private int chunk_count;
+
+  /**
+   * constructor
+   *
+   * @param min_surrogate_key Minimum value surrogate key for a segment
+   * @param max_surrogate_key Maximum value surrogate key for a segment
+   * @param start_offset      start offset of dictionary chunk in dictionary file for a segment
+   * @param end_offset        end offset of dictionary chunk in dictionary file for a segment
+   * @param chunk_count       count of dictionary chunks for a segment
+   */
+  public CarbonDictionaryColumnMetaChunk(int min_surrogate_key, int max_surrogate_key,
+      long start_offset, long end_offset, int chunk_count) {
+    this.min_surrogate_key = min_surrogate_key;
+    this.max_surrogate_key = max_surrogate_key;
+    this.start_offset = start_offset;
+    this.end_offset = end_offset;
+    this.chunk_count = chunk_count;
+  }
+
+  /**
+   * @return min surrogate key
+   */
+  public int getMin_surrogate_key() {
+    return min_surrogate_key;
+  }
+
+  /**
+   * @return max surrogate key
+   */
+  public int getMax_surrogate_key() {
+    return max_surrogate_key;
+  }
+
+  /**
+   * @return start offset
+   */
+  public long getStart_offset() {
+    return start_offset;
+  }
+
+  /**
+   * @return end offset
+   */
+  public long getEnd_offset() {
+    return end_offset;
+  }
+
+  /**
+   * @return chunk count
+   */
+  public int getChunk_count() {
+    return chunk_count;
+  }
+}
+
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryMetadataReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryMetadataReader.java b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryMetadataReader.java
new file mode 100644
index 0000000..09a85d3
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/reader/CarbonDictionaryMetadataReader.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.reader;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * dictionary metadata reader interface which declares methods to read dictionary metadata
+ */
+public interface CarbonDictionaryMetadataReader extends Closeable {
+
+  /**
+   * This method will be used to read complete metadata file.
+   * Applicable scenarios:
+   * 1. Query execution. Whenever a query is executed then to read the dictionary file
+   * and define the query scope first dictionary metadata has to be read first.
+   * 2. If dictionary file is read using start and end offset then using this meta list
+   * we can count the total number of dictionary chunks present between the 2 offsets
+   *
+   * @return list of all dictionary meta chunks which contains information for each segment
+   * @throws IOException if an I/O error occurs
+   */
+  List<CarbonDictionaryColumnMetaChunk> read() throws IOException;
+
+  /**
+   * This method will be used to read only the last entry of dictionary meta chunk.
+   * Applicable scenarios :
+   * 1. Global dictionary generation for incremental load. In this case only the
+   * last dictionary chunk meta entry has to be read to calculate min, max surrogate
+   * key and start and end offset for the new dictionary chunk.
+   * 2. Truncate operation. While writing dictionary file in case of incremental load
+   * dictionary file needs to be validated for any inconsistency. Here end offset of last
+   * dictionary chunk meta is validated with file size.
+   *
+   * @return last segment entry for dictionary chunk
+   * @throws IOException if an I/O error occurs
+   */
+  CarbonDictionaryColumnMetaChunk readLastEntryOfDictionaryMetaChunk() throws IOException;
+}


[32/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/impl/QueryExecutorProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/impl/QueryExecutorProperties.java b/core/src/main/java/org/apache/carbondata/scan/executor/impl/QueryExecutorProperties.java
new file mode 100644
index 0000000..2f21a96
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/impl/QueryExecutorProperties.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor.impl;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.carbon.datastore.block.AbstractIndex;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.carbon.querystatistics.QueryStatisticsRecorder;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+import org.apache.carbondata.scan.filter.GenericQueryType;
+
+/**
+ * Holds all the properties required for query execution
+ */
+public class QueryExecutorProperties {
+
+  /**
+   * holds the information required for updating the order block
+   * dictionary key
+   */
+  public KeyStructureInfo keyStructureInfo;
+  /**
+   * as we have multiple type of column aggregation like
+   * dimension,expression,measure so this will be used to for getting the
+   * measure aggregation start index
+   */
+  public int measureStartIndex;
+  /**
+   * query like count(1),count(*) ,etc will used this parameter
+   */
+  public boolean isFunctionQuery;
+  /**
+   * aggExpressionStartIndex
+   */
+  public int aggExpressionStartIndex;
+  /**
+   * index of the dimension which is present in the order by
+   * in a query
+   */
+  public byte[] sortDimIndexes;
+
+  /**
+   * this will hold the information about the dictionary dimension
+   * which to
+   */
+  public Map<String, Dictionary> columnToDictionayMapping;
+
+  /**
+   * Measure datatypes
+   */
+  public DataType[] measureDataTypes;
+  /**
+   * complex parent index to query mapping
+   */
+  public Map<Integer, GenericQueryType> complexDimensionInfoMap;
+  /**
+   * all the complex dimension which is on filter
+   */
+  public Set<CarbonDimension> complexFilterDimension;
+  /**
+   * to record the query execution details phase wise
+   */
+  public QueryStatisticsRecorder queryStatisticsRecorder;
+  /**
+   * list of blocks in which query will be executed
+   */
+  protected List<AbstractIndex> dataBlocks;
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/infos/AggregatorInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/infos/AggregatorInfo.java b/core/src/main/java/org/apache/carbondata/scan/executor/infos/AggregatorInfo.java
new file mode 100644
index 0000000..3a2c356
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/infos/AggregatorInfo.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor.infos;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+
+/**
+ * Info class which store all the details
+ * which is required during aggregation
+ */
+public class AggregatorInfo {
+
+  /**
+   * selected query measure ordinal
+   * which will be used to read the measures chunk data
+   * this will be storing the index of the measure in measures chunk
+   */
+  private int[] measureOrdinals;
+
+  /**
+   * This parameter will be used to
+   * check whether particular measure is present
+   * in the table block, if not then its default value will be used
+   */
+  private boolean[] measureExists;
+
+  /**
+   * this default value will be used to when some measure is not present
+   * in the table block, in case of restructuring of the table if user is adding any
+   * measure then in older block that measure wont be present so for measure default value
+   * will be used to aggregate in the older table block query execution
+   */
+  private Object[] defaultValues;
+
+  /**
+   * In carbon there are three type of aggregation
+   * (dimension aggregation, expression aggregation and measure aggregation)
+   * Below index will be used to set the start position of expression in measures
+   * aggregator array
+   */
+  private int expressionAggregatorStartIndex;
+
+  /**
+   * In carbon there are three type of aggregation
+   * (dimension aggregation, expression aggregation and measure aggregation)
+   * Below index will be used to set the start position of measures in measures
+   * aggregator array
+   */
+  private int measureAggregatorStartIndex;
+
+  /**
+   * Datatype of each measure;
+   */
+  private DataType[] measureDataTypes;
+
+  /**
+   * @return the measureOrdinal
+   */
+  public int[] getMeasureOrdinals() {
+    return measureOrdinals;
+  }
+
+  /**
+   * @param measureOrdinal the measureOrdinal to set
+   */
+  public void setMeasureOrdinals(int[] measureOrdinal) {
+    this.measureOrdinals = measureOrdinal;
+  }
+
+  /**
+   * @return the measureExists
+   */
+  public boolean[] getMeasureExists() {
+    return measureExists;
+  }
+
+  /**
+   * @param measureExists the measureExists to set
+   */
+  public void setMeasureExists(boolean[] measureExists) {
+    this.measureExists = measureExists;
+  }
+
+  /**
+   * @return the defaultValues
+   */
+  public Object[] getDefaultValues() {
+    return defaultValues;
+  }
+
+  /**
+   * @param defaultValues the defaultValues to set
+   */
+  public void setDefaultValues(Object[] defaultValues) {
+    this.defaultValues = defaultValues;
+  }
+
+  /**
+   * @return the expressionAggregatorStartIndex
+   */
+  public int getExpressionAggregatorStartIndex() {
+    return expressionAggregatorStartIndex;
+  }
+
+  /**
+   * @param expressionAggregatorStartIndex the expressionAggregatorStartIndex to set
+   */
+  public void setExpressionAggregatorStartIndex(int expressionAggregatorStartIndex) {
+    this.expressionAggregatorStartIndex = expressionAggregatorStartIndex;
+  }
+
+  /**
+   * @return the measureAggregatorStartIndex
+   */
+  public int getMeasureAggregatorStartIndex() {
+    return measureAggregatorStartIndex;
+  }
+
+  /**
+   * @param measureAggregatorStartIndex the measureAggregatorStartIndex to set
+   */
+  public void setMeasureAggregatorStartIndex(int measureAggregatorStartIndex) {
+    this.measureAggregatorStartIndex = measureAggregatorStartIndex;
+  }
+
+  public DataType[] getMeasureDataTypes() {
+    return measureDataTypes;
+  }
+
+  public void setMeasureDataTypes(DataType[] measureDataTypes) {
+    this.measureDataTypes = measureDataTypes;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/infos/BlockExecutionInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/infos/BlockExecutionInfo.java b/core/src/main/java/org/apache/carbondata/scan/executor/infos/BlockExecutionInfo.java
new file mode 100644
index 0000000..ca5c2e0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/infos/BlockExecutionInfo.java
@@ -0,0 +1,681 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor.infos;
+
+import java.util.Map;
+
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.carbon.datastore.block.AbstractIndex;
+import org.apache.carbondata.core.carbon.querystatistics.QueryStatisticsRecorder;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory.FileType;
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.scan.filter.GenericQueryType;
+import org.apache.carbondata.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.scan.model.QueryDimension;
+import org.apache.carbondata.scan.model.QueryMeasure;
+
+/**
+ * Below class will have all the properties which needed during query execution
+ * for one block
+ */
+public class BlockExecutionInfo {
+
+  /**
+   * block on which query will be executed
+   */
+  private AbstractIndex blockIndex;
+
+  /**
+   * each segment key size can be different and in that case we need to update
+   * the fixed key with latest segment key generator. so this property will
+   * tell whether this is required or not if key size is same then it is not
+   * required
+   */
+  private boolean isFixedKeyUpdateRequired;
+
+  /**
+   * in case of detail+order by query when number of output record is same we
+   * need to store data in the disk, so for this check will be used to whether
+   * we can write in the disk or not
+   */
+  private boolean isFileBasedQuery;
+
+  /**
+   * id of the query. this will be used to create directory while writing the
+   * data file in case of detail+order by query
+   */
+  private String queryId;
+
+  /**
+   * this to handle limit query in case of detail query we are pushing down
+   * the limit to executor level so based on the number of limit we can
+   * process only that many records
+   */
+  private int limit;
+
+  /**
+   * below to store all the information required for aggregation during query
+   * execution
+   */
+  private AggregatorInfo aggregatorInfo;
+
+  /**
+   * this will be used to get the first tentative block from which query
+   * execution start, this will be useful in case of filter query to get the
+   * start block based on filter values
+   */
+  private IndexKey startKey;
+
+  /**
+   * this will be used to get the last tentative block till which scanning
+   * will be done, this will be useful in case of filter query to get the last
+   * block based on filter values
+   */
+  private IndexKey endKey;
+
+  /**
+   * masked byte for block which will be used to unpack the fixed length key,
+   * this will be used for updating the older block key with new block key
+   * generator
+   */
+  private int[] maskedByteForBlock;
+
+  /**
+   * flag to check whether query is detail query or aggregation query
+   */
+  private boolean isDetailQuery;
+
+  /**
+   * total number of dimension in block
+   */
+  private int totalNumberDimensionBlock;
+
+  /**
+   * total number of measure in block
+   */
+  private int totalNumberOfMeasureBlock;
+
+  /**
+   * will be used to read the dimension block from file
+   */
+  private int[] allSelectedDimensionBlocksIndexes;
+
+  /**
+   * will be used to read the measure block from file
+   */
+  private int[] allSelectedMeasureBlocksIndexes;
+
+  /**
+   * this will be used to update the older block fixed length keys with the
+   * new block fixed length key
+   */
+  private KeyStructureInfo keyStructureInfo;
+
+  /**
+   * below will be used to sort the data based
+   */
+  private SortInfo sortInfo;
+
+  /**
+   * first block from which query execution will start
+   */
+  private DataRefNode firstDataBlock;
+
+  /**
+   * number of block to be scanned in the query
+   */
+  private long numberOfBlockToScan;
+
+  /**
+   * key size of the fixed length dimension column
+   */
+  private int fixedLengthKeySize;
+
+  /**
+   * dictionary column block indexes based on query
+   */
+  private int[] dictionaryColumnBlockIndex;
+  /**
+   * no dictionary column block indexes in based on the query order
+   */
+  private int[] noDictionaryBlockIndexes;
+
+  /**
+   * key generator used for generating the table block fixed length key
+   */
+  private KeyGenerator blockKeyGenerator;
+
+  /**
+   * each column value size
+   */
+  private int[] eachColumnValueSize;
+
+  /**
+   * partition number
+   */
+  private String partitionId;
+
+  /**
+   * column group block index in file to key structure info mapping
+   */
+  private Map<Integer, KeyStructureInfo> columnGroupToKeyStructureInfo;
+
+  /**
+   * mapping of dictionary dimension to its dictionary mapping which will be
+   * used to get the actual data from dictionary for aggregation, sorting
+   */
+  private Map<String, Dictionary> columnIdToDcitionaryMapping;
+
+  /**
+   * filter tree to execute the filter
+   */
+  private FilterExecuter filterExecuterTree;
+
+  /**
+   * fileType
+   */
+  private FileType fileType;
+
+  /**
+   * whether it needs only raw byte records with out aggregation.
+   */
+  private boolean isRawRecordDetailQuery;
+
+  /**
+   * complexParentIndexToQueryMap
+   */
+  private Map<Integer, GenericQueryType> complexParentIndexToQueryMap;
+
+  /**
+   * complex dimension parent block indexes;
+   */
+  private int[] complexColumnParentBlockIndexes;
+
+  /**
+   * to record the statistics
+   */
+  private QueryStatisticsRecorder statisticsRecorder;
+
+  /**
+   * @return the tableBlock
+   */
+  public AbstractIndex getDataBlock() {
+    return blockIndex;
+  }
+
+  /**
+   * list of dimension selected for in query
+   */
+  private QueryDimension[] queryDimensions;
+
+  /**
+   * list of measure selected in query
+   */
+  private QueryMeasure[] queryMeasures;
+
+  /**
+   * @param blockIndex the tableBlock to set
+   */
+  public void setDataBlock(AbstractIndex blockIndex) {
+    this.blockIndex = blockIndex;
+  }
+
+  /**
+   * @return the isFixedKeyUpdateRequired
+   */
+  public boolean isFixedKeyUpdateRequired() {
+    return isFixedKeyUpdateRequired;
+  }
+
+  /**
+   * @param isFixedKeyUpdateRequired the isFixedKeyUpdateRequired to set
+   */
+  public void setFixedKeyUpdateRequired(boolean isFixedKeyUpdateRequired) {
+    this.isFixedKeyUpdateRequired = isFixedKeyUpdateRequired;
+  }
+
+  /**
+   * @return the isFileBasedQuery
+   */
+  public boolean isFileBasedQuery() {
+    return isFileBasedQuery;
+  }
+
+  /**
+   * @param isFileBasedQuery the isFileBasedQuery to set
+   */
+  public void setFileBasedQuery(boolean isFileBasedQuery) {
+    this.isFileBasedQuery = isFileBasedQuery;
+  }
+
+  /**
+   * @return the queryId
+   */
+  public String getQueryId() {
+    return queryId;
+  }
+
+  /**
+   * @param queryId the queryId to set
+   */
+  public void setQueryId(String queryId) {
+    this.queryId = queryId;
+  }
+
+  /**
+   * @return the limit
+   */
+  public int getLimit() {
+    return limit;
+  }
+
+  /**
+   * @param limit the limit to set
+   */
+  public void setLimit(int limit) {
+    this.limit = limit;
+  }
+
+  /**
+   * @return the aggregatorInfos
+   */
+  public AggregatorInfo getAggregatorInfo() {
+    return aggregatorInfo;
+  }
+
+  /**
+   * @param aggregatorInfo the aggregatorInfos to set
+   */
+  public void setAggregatorInfo(AggregatorInfo aggregatorInfo) {
+    this.aggregatorInfo = aggregatorInfo;
+  }
+
+  /**
+   * @return the startKey
+   */
+  public IndexKey getStartKey() {
+    return startKey;
+  }
+
+  /**
+   * @param startKey the startKey to set
+   */
+  public void setStartKey(IndexKey startKey) {
+    this.startKey = startKey;
+  }
+
+  /**
+   * @return the endKey
+   */
+  public IndexKey getEndKey() {
+    return endKey;
+  }
+
+  /**
+   * @param endKey the endKey to set
+   */
+  public void setEndKey(IndexKey endKey) {
+    this.endKey = endKey;
+  }
+
+  /**
+   * @return the maskedByteForBlock
+   */
+  public int[] getMaskedByteForBlock() {
+    return maskedByteForBlock;
+  }
+
+
+
+  /**
+   * @param maskedByteForBlock the maskedByteForBlock to set
+   */
+  public void setMaskedByteForBlock(int[] maskedByteForBlock) {
+    this.maskedByteForBlock = maskedByteForBlock;
+  }
+
+  /**
+   * @return the isDetailQuery
+   */
+  public boolean isDetailQuery() {
+    return isDetailQuery;
+  }
+
+  /**
+   * @param isDetailQuery the isDetailQuery to set
+   */
+  public void setDetailQuery(boolean isDetailQuery) {
+    this.isDetailQuery = isDetailQuery;
+  }
+
+  /**
+   * @return the totalNumberDimensionBlock
+   */
+  public int getTotalNumberDimensionBlock() {
+    return totalNumberDimensionBlock;
+  }
+
+  /**
+   * @param totalNumberDimensionBlock the totalNumberDimensionBlock to set
+   */
+  public void setTotalNumberDimensionBlock(int totalNumberDimensionBlock) {
+    this.totalNumberDimensionBlock = totalNumberDimensionBlock;
+  }
+
+  /**
+   * @return the totalNumberOfMeasureBlock
+   */
+  public int getTotalNumberOfMeasureBlock() {
+    return totalNumberOfMeasureBlock;
+  }
+
+  /**
+   * @param totalNumberOfMeasureBlock the totalNumberOfMeasureBlock to set
+   */
+  public void setTotalNumberOfMeasureBlock(int totalNumberOfMeasureBlock) {
+    this.totalNumberOfMeasureBlock = totalNumberOfMeasureBlock;
+  }
+
+  /**
+   * @return the allSelectedDimensionBlocksIndexes
+   */
+  public int[] getAllSelectedDimensionBlocksIndexes() {
+    return allSelectedDimensionBlocksIndexes;
+  }
+
+  /**
+   * @param allSelectedDimensionBlocksIndexes the allSelectedDimensionBlocksIndexes to set
+   */
+  public void setAllSelectedDimensionBlocksIndexes(int[] allSelectedDimensionBlocksIndexes) {
+    this.allSelectedDimensionBlocksIndexes = allSelectedDimensionBlocksIndexes;
+  }
+
+  /**
+   * @return the allSelectedMeasureBlocksIndexes
+   */
+  public int[] getAllSelectedMeasureBlocksIndexes() {
+    return allSelectedMeasureBlocksIndexes;
+  }
+
+  /**
+   * @param allSelectedMeasureBlocksIndexes the allSelectedMeasureBlocksIndexes to set
+   */
+  public void setAllSelectedMeasureBlocksIndexes(int[] allSelectedMeasureBlocksIndexes) {
+    this.allSelectedMeasureBlocksIndexes = allSelectedMeasureBlocksIndexes;
+  }
+
+  /**
+   * @return the restructureInfos
+   */
+  public KeyStructureInfo getKeyStructureInfo() {
+    return keyStructureInfo;
+  }
+
+  /**
+   * @param keyStructureInfo the restructureInfos to set
+   */
+  public void setKeyStructureInfo(KeyStructureInfo keyStructureInfo) {
+    this.keyStructureInfo = keyStructureInfo;
+  }
+
+  /**
+   * @return the sortInfos
+   */
+  public SortInfo getSortInfo() {
+    return sortInfo;
+  }
+
+  /**
+   * @param sortInfo the sortInfos to set
+   */
+  public void setSortInfo(SortInfo sortInfo) {
+    this.sortInfo = sortInfo;
+  }
+
+  /**
+   * @return the firstDataBlock
+   */
+  public DataRefNode getFirstDataBlock() {
+    return firstDataBlock;
+  }
+
+  /**
+   * @param firstDataBlock the firstDataBlock to set
+   */
+  public void setFirstDataBlock(DataRefNode firstDataBlock) {
+    this.firstDataBlock = firstDataBlock;
+  }
+
+  /**
+   * @return the numberOfBlockToScan
+   */
+  public long getNumberOfBlockToScan() {
+    return numberOfBlockToScan;
+  }
+
+  /**
+   * @param numberOfBlockToScan the numberOfBlockToScan to set
+   */
+  public void setNumberOfBlockToScan(long numberOfBlockToScan) {
+    this.numberOfBlockToScan = numberOfBlockToScan;
+  }
+
+  /**
+   * @return the fixedLengthKeySize
+   */
+  public int getFixedLengthKeySize() {
+    return fixedLengthKeySize;
+  }
+
+  /**
+   * @param fixedLengthKeySize the fixedLengthKeySize to set
+   */
+  public void setFixedLengthKeySize(int fixedLengthKeySize) {
+    this.fixedLengthKeySize = fixedLengthKeySize;
+  }
+
+  /**
+   * @return the filterEvaluatorTree
+   */
+  public FilterExecuter getFilterExecuterTree() {
+    return filterExecuterTree;
+  }
+
+  /**
+   * @param filterExecuterTree the filterEvaluatorTree to set
+   */
+  public void setFilterExecuterTree(FilterExecuter filterExecuterTree) {
+    this.filterExecuterTree = filterExecuterTree;
+  }
+
+  /**
+   * @return the tableBlockKeyGenerator
+   */
+  public KeyGenerator getBlockKeyGenerator() {
+    return blockKeyGenerator;
+  }
+
+  /**
+   * @param tableBlockKeyGenerator the tableBlockKeyGenerator to set
+   */
+  public void setBlockKeyGenerator(KeyGenerator tableBlockKeyGenerator) {
+    this.blockKeyGenerator = tableBlockKeyGenerator;
+  }
+
+  /**
+   * @return the eachColumnValueSize
+   */
+  public int[] getEachColumnValueSize() {
+    return eachColumnValueSize;
+  }
+
+  /**
+   * @param eachColumnValueSize the eachColumnValueSize to set
+   */
+  public void setEachColumnValueSize(int[] eachColumnValueSize) {
+    this.eachColumnValueSize = eachColumnValueSize;
+  }
+
+  /**
+   * @return the partitionId
+   */
+  public String getPartitionId() {
+    return partitionId;
+  }
+
+  /**
+   * @param partitionId the partitionId to set
+   */
+  public void setPartitionId(String partitionId) {
+    this.partitionId = partitionId;
+  }
+
+  /**
+   * @return the dictionaryColumnBlockIndex
+   */
+  public int[] getDictionaryColumnBlockIndex() {
+    return dictionaryColumnBlockIndex;
+  }
+
+  /**
+   * @param dictionaryColumnBlockIndex the dictionaryColumnBlockIndex to set
+   */
+  public void setDictionaryColumnBlockIndex(int[] dictionaryColumnBlockIndex) {
+    this.dictionaryColumnBlockIndex = dictionaryColumnBlockIndex;
+  }
+
+  /**
+   * @return the noDictionaryBlockIndexes
+   */
+  public int[] getNoDictionaryBlockIndexes() {
+    return noDictionaryBlockIndexes;
+  }
+
+  /**
+   * @param noDictionaryBlockIndexes the noDictionaryBlockIndexes to set
+   */
+  public void setNoDictionaryBlockIndexes(int[] noDictionaryBlockIndexes) {
+    this.noDictionaryBlockIndexes = noDictionaryBlockIndexes;
+  }
+
+  /**
+   * @return the columnGroupToKeyStructureInfo
+   */
+  public Map<Integer, KeyStructureInfo> getColumnGroupToKeyStructureInfo() {
+    return columnGroupToKeyStructureInfo;
+  }
+
+  /**
+   * @param columnGroupToKeyStructureInfo the columnGroupToKeyStructureInfo to set
+   */
+  public void setColumnGroupToKeyStructureInfo(
+      Map<Integer, KeyStructureInfo> columnGroupToKeyStructureInfo) {
+    this.columnGroupToKeyStructureInfo = columnGroupToKeyStructureInfo;
+  }
+
+  /**
+   * @return the columnIdToDcitionaryMapping
+   */
+  public Map<String, Dictionary> getColumnIdToDcitionaryMapping() {
+    return columnIdToDcitionaryMapping;
+  }
+
+  /**
+   * @param columnIdToDcitionaryMapping the columnIdToDcitionaryMapping to set
+   */
+  public void setColumnIdToDcitionaryMapping(Map<String, Dictionary> columnIdToDcitionaryMapping) {
+    this.columnIdToDcitionaryMapping = columnIdToDcitionaryMapping;
+  }
+
+  /**
+   * @return the fileType
+   */
+  public FileType getFileType() {
+    return fileType;
+  }
+
+  /**
+   * @param fileType the fileType to set
+   */
+  public void setFileType(FileType fileType) {
+    this.fileType = fileType;
+  }
+
+  public boolean isRawRecordDetailQuery() {
+    return isRawRecordDetailQuery;
+  }
+
+  public void setRawRecordDetailQuery(boolean rawRecordDetailQuery) {
+    isRawRecordDetailQuery = rawRecordDetailQuery;
+  }
+
+  /**
+   * @return the complexParentIndexToQueryMap
+   */
+  public Map<Integer, GenericQueryType> getComlexDimensionInfoMap() {
+    return complexParentIndexToQueryMap;
+  }
+
+  /**
+   * @param complexDimensionInfoMap the complexParentIndexToQueryMap to set
+   */
+  public void setComplexDimensionInfoMap(Map<Integer, GenericQueryType> complexDimensionInfoMap) {
+    this.complexParentIndexToQueryMap = complexDimensionInfoMap;
+  }
+
+  /**
+   * @return the complexColumnParentBlockIndexes
+   */
+  public int[] getComplexColumnParentBlockIndexes() {
+    return complexColumnParentBlockIndexes;
+  }
+
+  /**
+   * @param complexColumnParentBlockIndexes the complexColumnParentBlockIndexes to set
+   */
+  public void setComplexColumnParentBlockIndexes(int[] complexColumnParentBlockIndexes) {
+    this.complexColumnParentBlockIndexes = complexColumnParentBlockIndexes;
+  }
+
+  public QueryStatisticsRecorder getStatisticsRecorder() {
+    return statisticsRecorder;
+  }
+
+  public void setStatisticsRecorder(QueryStatisticsRecorder statisticsRecorder) {
+    this.statisticsRecorder = statisticsRecorder;
+  }
+
+  public QueryDimension[] getQueryDimensions() {
+    return queryDimensions;
+  }
+
+  public void setQueryDimensions(QueryDimension[] queryDimensions) {
+    this.queryDimensions = queryDimensions;
+  }
+
+  public QueryMeasure[] getQueryMeasures() {
+    return queryMeasures;
+  }
+
+  public void setQueryMeasures(QueryMeasure[] queryMeasures) {
+    this.queryMeasures = queryMeasures;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/infos/KeyStructureInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/infos/KeyStructureInfo.java b/core/src/main/java/org/apache/carbondata/scan/executor/infos/KeyStructureInfo.java
new file mode 100644
index 0000000..ad3ff5f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/infos/KeyStructureInfo.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor.infos;
+
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+
+/**
+ * Below class will store the structure of the key
+ * used during query execution
+ */
+public class KeyStructureInfo {
+
+  /**
+   * it's actually a latest key generator
+   * last table block as this key generator will be used to
+   * to update the mdkey of the older slice with the new slice
+   */
+  private KeyGenerator keyGenerator;
+
+  /**
+   * mask bytes ranges for the query
+   */
+  private int[] maskByteRanges;
+
+  /**
+   * masked bytes of the query
+   */
+  private int[] maskedBytes;
+
+  /**
+   * max key for query execution
+   */
+  private byte[] maxKey;
+
+  /**
+   * dimension ordinals inside the column group
+   */
+  private int[] mdkeyQueryDimensionOrdinal;
+
+  /**
+   * @return the keyGenerator
+   */
+  public KeyGenerator getKeyGenerator() {
+    return keyGenerator;
+  }
+
+  /**
+   * @param keyGenerator the keyGenerator to set
+   */
+  public void setKeyGenerator(KeyGenerator keyGenerator) {
+    this.keyGenerator = keyGenerator;
+  }
+
+  /**
+   * @return the maskByteRanges
+   */
+  public int[] getMaskByteRanges() {
+    return maskByteRanges;
+  }
+
+  /**
+   * @param maskByteRanges the maskByteRanges to set
+   */
+  public void setMaskByteRanges(int[] maskByteRanges) {
+    this.maskByteRanges = maskByteRanges;
+  }
+
+  /**
+   * @return the maskedBytes
+   */
+  public int[] getMaskedBytes() {
+    return maskedBytes;
+  }
+
+  /**
+   * @param maskedBytes the maskedBytes to set
+   */
+  public void setMaskedBytes(int[] maskedBytes) {
+    this.maskedBytes = maskedBytes;
+  }
+
+  /**
+   * @return the maxKey
+   */
+  public byte[] getMaxKey() {
+    return maxKey;
+  }
+
+  /**
+   * @param maxKey the maxKey to set
+   */
+  public void setMaxKey(byte[] maxKey) {
+    this.maxKey = maxKey;
+  }
+
+  public int[] getMdkeyQueryDimensionOrdinal() {
+    return mdkeyQueryDimensionOrdinal;
+  }
+
+  public void setMdkeyQueryDimensionOrdinal(int[] mdkeyQueryDimensionOrdinal) {
+    this.mdkeyQueryDimensionOrdinal = mdkeyQueryDimensionOrdinal;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/infos/SortInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/infos/SortInfo.java b/core/src/main/java/org/apache/carbondata/scan/executor/infos/SortInfo.java
new file mode 100644
index 0000000..457e185
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/infos/SortInfo.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor.infos;
+
+import java.util.List;
+
+import org.apache.carbondata.scan.model.QueryDimension;
+
+/**
+ * Below class holds the order by information about the query
+ */
+public class SortInfo {
+
+  /**
+   * sorting order of a dimension
+   */
+  private byte[] dimensionSortOrder;
+
+  /**
+   * byte range of each dimension present in the order by
+   */
+  private int[][] maskedByteRangeForSorting;
+
+  /**
+   * dimension indexes which is used in order bye
+   */
+  private byte[] sortDimensionIndex;
+
+  /**
+   * mask key of each dimension
+   * this will be used to sort the dimension
+   */
+  private byte[][] dimensionMaskKeyForSorting;
+
+  /**
+   * sortDimension
+   */
+  private List<QueryDimension> sortDimension;
+
+  /**
+   * @return the dimensionSortOrder
+   */
+  public byte[] getDimensionSortOrder() {
+    return dimensionSortOrder;
+  }
+
+  /**
+   * @param dimensionSortOrder the dimensionSortOrder to set
+   */
+  public void setDimensionSortOrder(byte[] dimensionSortOrder) {
+    this.dimensionSortOrder = dimensionSortOrder;
+  }
+
+  /**
+   * @return the maskedByteRangeForSorting
+   */
+  public int[][] getMaskedByteRangeForSorting() {
+    return maskedByteRangeForSorting;
+  }
+
+  /**
+   * @param maskedByteRangeForSorting the maskedByteRangeForSorting to set
+   */
+  public void setMaskedByteRangeForSorting(int[][] maskedByteRangeForSorting) {
+    this.maskedByteRangeForSorting = maskedByteRangeForSorting;
+  }
+
+  /**
+   * @return the sortDimensionIndex
+   */
+  public byte[] getSortDimensionIndex() {
+    return sortDimensionIndex;
+  }
+
+  /**
+   * @param sortDimensionIndex the sortDimensionIndex to set
+   */
+  public void setSortDimensionIndex(byte[] sortDimensionIndex) {
+    this.sortDimensionIndex = sortDimensionIndex;
+  }
+
+  /**
+   * @return the dimensionMaskKeyForSorting
+   */
+  public byte[][] getDimensionMaskKeyForSorting() {
+    return dimensionMaskKeyForSorting;
+  }
+
+  /**
+   * @param dimensionMaskKeyForSorting the dimensionMaskKeyForSorting to set
+   */
+  public void setDimensionMaskKeyForSorting(byte[][] dimensionMaskKeyForSorting) {
+    this.dimensionMaskKeyForSorting = dimensionMaskKeyForSorting;
+  }
+
+  /**
+   * @return the sortDimension
+   */
+  public List<QueryDimension> getSortDimension() {
+    return sortDimension;
+  }
+
+  /**
+   * @param sortDimension the sortDimension to set
+   */
+  public void setSortDimension(List<QueryDimension> sortDimension) {
+    this.sortDimension = sortDimension;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/util/QueryUtil.java b/core/src/main/java/org/apache/carbondata/scan/executor/util/QueryUtil.java
new file mode 100644
index 0000000..8837012
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/util/QueryUtil.java
@@ -0,0 +1,951 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor.util;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.carbondata.core.cache.Cache;
+import org.apache.carbondata.core.cache.CacheProvider;
+import org.apache.carbondata.core.cache.CacheType;
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.metadata.CarbonMetadata;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.carbon.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.CarbonUtilException;
+import org.apache.carbondata.scan.complextypes.ArrayQueryType;
+import org.apache.carbondata.scan.complextypes.PrimitiveQueryType;
+import org.apache.carbondata.scan.complextypes.StructQueryType;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.filter.GenericQueryType;
+import org.apache.carbondata.scan.filter.resolver.FilterResolverIntf;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.model.QueryDimension;
+import org.apache.carbondata.scan.model.QueryMeasure;
+import org.apache.carbondata.scan.model.QueryModel;
+
+import org.apache.commons.lang3.ArrayUtils;
+
+/**
+ * Utility class for query execution
+ */
+public class QueryUtil {
+
+  /**
+   * Below method will be used to get the masked byte range based on the query
+   * dimension. It will give the range in the mdkey. This will be used to get
+   * the actual key array from masked mdkey
+   *
+   * @param queryDimensions query dimension selected in query
+   * @param keyGenerator    key generator
+   * @return masked key
+   */
+  public static int[] getMaskedByteRange(List<QueryDimension> queryDimensions,
+      KeyGenerator keyGenerator) {
+    Set<Integer> byteRangeSet = new TreeSet<Integer>();
+    int[] byteRange = null;
+    for (int i = 0; i < queryDimensions.size(); i++) {
+
+      // as no dictionary column and complex type columns
+      // are not selected in the mdkey
+      // so we will not select the those dimension for calculating the
+      // range
+      if (queryDimensions.get(i).getDimension().getKeyOrdinal() == -1) {
+        continue;
+      }
+      // get the offset of the dimension in the mdkey
+      byteRange =
+          keyGenerator.getKeyByteOffsets(queryDimensions.get(i).getDimension().getKeyOrdinal());
+      for (int j = byteRange[0]; j <= byteRange[1]; j++) {
+        byteRangeSet.add(j);
+      }
+    }
+    int[] maksedByteRange = new int[byteRangeSet.size()];
+    int index = 0;
+    Iterator<Integer> iterator = byteRangeSet.iterator();
+    // add the masked byte range
+    while (iterator.hasNext()) {
+      maksedByteRange[index++] = iterator.next();
+    }
+    return maksedByteRange;
+  }
+
+  public static int[] getMaskedByteRangeBasedOrdinal(List<Integer> ordinals,
+      KeyGenerator keyGenerator) {
+    Set<Integer> byteRangeSet = new TreeSet<Integer>();
+    int[] byteRange = null;
+    for (int i = 0; i < ordinals.size(); i++) {
+
+      // get the offset of the dimension in the mdkey
+      byteRange = keyGenerator.getKeyByteOffsets(ordinals.get(i));
+      for (int j = byteRange[0]; j <= byteRange[1]; j++) {
+        byteRangeSet.add(j);
+      }
+    }
+    int[] maksedByteRange = new int[byteRangeSet.size()];
+    int index = 0;
+    Iterator<Integer> iterator = byteRangeSet.iterator();
+    // add the masked byte range
+    while (iterator.hasNext()) {
+      maksedByteRange[index++] = iterator.next();
+    }
+    return maksedByteRange;
+  }
+
+  /**
+   * Below method will return the max key based on the dimension ordinal
+   *
+   * @param keyOrdinalList
+   * @param generator
+   * @return
+   * @throws KeyGenException
+   */
+  public static byte[] getMaxKeyBasedOnOrinal(List<Integer> keyOrdinalList, KeyGenerator generator)
+      throws KeyGenException {
+    long[] max = new long[generator.getDimCount()];
+    Arrays.fill(max, 0L);
+
+    for (int i = 0; i < keyOrdinalList.size(); i++) {
+      // adding for dimension which is selected in query
+      max[keyOrdinalList.get(i)] = Long.MAX_VALUE;
+    }
+    return generator.generateKey(max);
+  }
+
+  /**
+   * To get the max key based on dimensions. i.e. all other dimensions will be
+   * set to 0 bits and the required query dimension will be masked with all
+   * LONG.MAX so that we can mask key and then compare while aggregating This
+   * can be useful during filter query when only few dimensions were selected
+   * out of row group
+   *
+   * @param queryDimensions dimension selected in query
+   * @param generator       key generator
+   * @return max key for dimension
+   * @throws KeyGenException if any problem while generating the key
+   */
+  public static byte[] getMaxKeyBasedOnDimensions(List<QueryDimension> queryDimensions,
+      KeyGenerator generator) throws KeyGenException {
+    long[] max = new long[generator.getDimCount()];
+    Arrays.fill(max, 0L);
+
+    for (int i = 0; i < queryDimensions.size(); i++) {
+      // as no dictionary column and complex type columns
+      // are not selected in the mdkey
+      // so we will not select the those dimension for calculating the
+      // range
+      if (queryDimensions.get(i).getDimension().getKeyOrdinal() == -1) {
+        continue;
+      }
+      // adding for dimension which is selected in query
+      max[queryDimensions.get(i).getDimension().getKeyOrdinal()] = Long.MAX_VALUE;
+    }
+
+    return generator.generateKey(max);
+  }
+
+  /**
+   * Below method will be used to get the masked key for query
+   *
+   * @param keySize         size of the masked key
+   * @param maskedKeyRanges masked byte range
+   * @return masked bytes
+   */
+  public static int[] getMaskedByte(int keySize, int[] maskedKeyRanges) {
+    int[] maskedKey = new int[keySize];
+    // all the non selected dimension will be filled with -1
+    Arrays.fill(maskedKey, -1);
+    for (int i = 0; i < maskedKeyRanges.length; i++) {
+      maskedKey[maskedKeyRanges[i]] = i;
+    }
+    return maskedKey;
+  }
+
+  /**
+   * Below method will be used to get the dimension block index in file based
+   * on query dimension
+   *
+   * @param queryDimensions                query dimension
+   * @param dimensionOrdinalToBlockMapping mapping of dimension block in file to query dimension
+   * @return block index of file
+   */
+  public static int[] getDimensionsBlockIndexes(List<QueryDimension> queryDimensions,
+      Map<Integer, Integer> dimensionOrdinalToBlockMapping,
+      List<CarbonDimension> customAggregationDimension) {
+    // using set as in row group columns will point to same block
+    Set<Integer> dimensionBlockIndex = new HashSet<Integer>();
+    int blockIndex = 0;
+    for (int i = 0; i < queryDimensions.size(); i++) {
+      blockIndex =
+          dimensionOrdinalToBlockMapping.get(queryDimensions.get(i).getDimension().getOrdinal());
+      dimensionBlockIndex.add(blockIndex);
+      if (queryDimensions.get(i).getDimension().numberOfChild() > 0) {
+        addChildrenBlockIndex(dimensionBlockIndex, queryDimensions.get(i).getDimension());
+      }
+    }
+    for (int i = 0; i < customAggregationDimension.size(); i++) {
+      blockIndex =
+          dimensionOrdinalToBlockMapping.get(customAggregationDimension.get(i).getOrdinal());
+      // not adding the children dimension as dimension aggregation
+      // is not push down in case of complex dimension
+      dimensionBlockIndex.add(blockIndex);
+    }
+    return ArrayUtils
+        .toPrimitive(dimensionBlockIndex.toArray(new Integer[dimensionBlockIndex.size()]));
+  }
+
+  /**
+   * Below method will be used to add the children block index
+   * this will be basically for complex dimension which will have children
+   *
+   * @param blockIndexes block indexes
+   * @param dimension    parent dimension
+   */
+  private static void addChildrenBlockIndex(Set<Integer> blockIndexes, CarbonDimension dimension) {
+    for (int i = 0; i < dimension.numberOfChild(); i++) {
+      addChildrenBlockIndex(blockIndexes, dimension.getListOfChildDimensions().get(i));
+      blockIndexes.add(dimension.getListOfChildDimensions().get(i).getOrdinal());
+    }
+  }
+
+  /**
+   * Below method will be used to get the dictionary mapping for all the
+   * dictionary encoded dimension present in the query
+   *
+   * @param queryDimensions            query dimension present in the query this will be used to
+   *                                   convert the result from surrogate key to actual data
+   * @param absoluteTableIdentifier    absolute table identifier
+   * @return dimension unique id to its dictionary map
+   * @throws QueryExecutionException
+   */
+  public static Map<String, Dictionary> getDimensionDictionaryDetail(
+      List<QueryDimension> queryDimensions,
+      Set<CarbonDimension> filterComplexDimensions,
+      AbsoluteTableIdentifier absoluteTableIdentifier) throws QueryExecutionException {
+    // to store dimension unique column id list, this is required as
+    // dimension can be present in
+    // query dimension, as well as some aggregation function will be applied
+    // in the same dimension
+    // so we need to get only one instance of dictionary
+    // direct dictionary skip is done only for the dictionary lookup
+    Set<String> dictionaryDimensionFromQuery = new HashSet<String>();
+    for (int i = 0; i < queryDimensions.size(); i++) {
+      List<Encoding> encodingList = queryDimensions.get(i).getDimension().getEncoder();
+      // TODO need to remove the data type check for parent column in complex type no need to
+      // write encoding dictionary
+      if (CarbonUtil.hasEncoding(encodingList, Encoding.DICTIONARY) && !CarbonUtil
+          .hasEncoding(encodingList, Encoding.DIRECT_DICTIONARY)) {
+
+        if (queryDimensions.get(i).getDimension().numberOfChild() == 0) {
+          dictionaryDimensionFromQuery.add(queryDimensions.get(i).getDimension().getColumnId());
+        }
+        if (queryDimensions.get(i).getDimension().numberOfChild() > 0) {
+          getChildDimensionDictionaryDetail(queryDimensions.get(i).getDimension(),
+              dictionaryDimensionFromQuery);
+        }
+      }
+    }
+    Iterator<CarbonDimension> iterator = filterComplexDimensions.iterator();
+    while (iterator.hasNext()) {
+      getChildDimensionDictionaryDetail(iterator.next(), dictionaryDimensionFromQuery);
+    }
+    // converting to list as api exposed needed list which i think
+    // is not correct
+    List<String> dictionaryColumnIdList =
+        new ArrayList<String>(dictionaryDimensionFromQuery.size());
+    dictionaryColumnIdList.addAll(dictionaryDimensionFromQuery);
+    return getDictionaryMap(dictionaryColumnIdList, absoluteTableIdentifier);
+  }
+
+  /**
+   * Below method will be used to fill the children dimension column id
+   *
+   * @param queryDimensions              query dimension
+   * @param dictionaryDimensionFromQuery dictionary dimension for query
+   */
+  private static void getChildDimensionDictionaryDetail(CarbonDimension queryDimensions,
+      Set<String> dictionaryDimensionFromQuery) {
+    for (int j = 0; j < queryDimensions.numberOfChild(); j++) {
+      List<Encoding> encodingList = queryDimensions.getListOfChildDimensions().get(j).getEncoder();
+      if (queryDimensions.getListOfChildDimensions().get(j).numberOfChild() > 0) {
+        getChildDimensionDictionaryDetail(queryDimensions.getListOfChildDimensions().get(j),
+            dictionaryDimensionFromQuery);
+      } else if (!CarbonUtil.hasEncoding(encodingList, Encoding.DIRECT_DICTIONARY)) {
+        dictionaryDimensionFromQuery
+            .add(queryDimensions.getListOfChildDimensions().get(j).getColumnId());
+      }
+    }
+  }
+
+  /**
+   * Below method will be used to get the column id to its dictionary mapping
+   *
+   * @param dictionaryColumnIdList  dictionary column list
+   * @param absoluteTableIdentifier absolute table identifier
+   * @return dictionary mapping
+   * @throws QueryExecutionException
+   */
+  private static Map<String, Dictionary> getDictionaryMap(List<String> dictionaryColumnIdList,
+      AbsoluteTableIdentifier absoluteTableIdentifier) throws QueryExecutionException {
+    // this for dictionary unique identifier
+    List<DictionaryColumnUniqueIdentifier> dictionaryColumnUniqueIdentifiers =
+        getDictionaryColumnUniqueIdentifierList(dictionaryColumnIdList,
+            absoluteTableIdentifier.getCarbonTableIdentifier());
+    CacheProvider cacheProvider = CacheProvider.getInstance();
+    Cache forwardDictionaryCache = cacheProvider
+        .createCache(CacheType.FORWARD_DICTIONARY, absoluteTableIdentifier.getStorePath());
+    List<Dictionary> columnDictionaryList = null;
+    try {
+      columnDictionaryList = forwardDictionaryCache.getAll(dictionaryColumnUniqueIdentifiers);
+    } catch (CarbonUtilException e) {
+      throw new QueryExecutionException(e);
+    }
+    Map<String, Dictionary> columnDictionaryMap = new HashMap<>(columnDictionaryList.size());
+    for (int i = 0; i < dictionaryColumnUniqueIdentifiers.size(); i++) {
+      // TODO: null check for column dictionary, if cache size is less it
+      // might return null here, in that case throw exception
+      columnDictionaryMap.put(dictionaryColumnIdList.get(i), columnDictionaryList.get(i));
+    }
+    return columnDictionaryMap;
+  }
+
+  /**
+   * Below method will be used to get the dictionary column unique identifier
+   *
+   * @param dictionaryColumnIdList dictionary
+   * @param carbonTableIdentifier
+   * @return
+   */
+  private static List<DictionaryColumnUniqueIdentifier> getDictionaryColumnUniqueIdentifierList(
+      List<String> dictionaryColumnIdList, CarbonTableIdentifier carbonTableIdentifier)
+      throws QueryExecutionException {
+    CarbonTable carbonTable =
+        CarbonMetadata.getInstance().getCarbonTable(carbonTableIdentifier.getTableUniqueName());
+    List<DictionaryColumnUniqueIdentifier> dictionaryColumnUniqueIdentifiers =
+        new ArrayList<>(dictionaryColumnIdList.size());
+    for (String columnId : dictionaryColumnIdList) {
+      CarbonDimension dimension = CarbonMetadata.getInstance()
+          .getCarbonDimensionBasedOnColIdentifier(carbonTable, columnId);
+      if (null == dimension) {
+        throw new QueryExecutionException("The column id " + columnId + " could not be resolved.");
+      }
+      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier =
+          new DictionaryColumnUniqueIdentifier(carbonTableIdentifier,
+              dimension.getColumnIdentifier(), dimension.getDataType());
+      dictionaryColumnUniqueIdentifiers.add(dictionaryColumnUniqueIdentifier);
+    }
+    return dictionaryColumnUniqueIdentifiers;
+  }
+
+  /**
+   * Below method will used to get the method will be used to get the measure
+   * block indexes to be read from the file
+   *
+   * @param queryMeasures              query measure
+   * @param expressionMeasure          measure present in the expression
+   * @param ordinalToBlockIndexMapping measure ordinal to block mapping
+   * @return block indexes
+   */
+  public static int[] getMeasureBlockIndexes(List<QueryMeasure> queryMeasures,
+      List<CarbonMeasure> expressionMeasure, Map<Integer, Integer> ordinalToBlockIndexMapping) {
+    Set<Integer> measureBlockIndex = new HashSet<Integer>();
+    for (int i = 0; i < queryMeasures.size(); i++) {
+      measureBlockIndex
+          .add(ordinalToBlockIndexMapping.get(queryMeasures.get(i).getMeasure().getOrdinal()));
+    }
+    for (int i = 0; i < expressionMeasure.size(); i++) {
+      measureBlockIndex.add(ordinalToBlockIndexMapping.get(expressionMeasure.get(i).getOrdinal()));
+    }
+    return ArrayUtils.toPrimitive(measureBlockIndex.toArray(new Integer[measureBlockIndex.size()]));
+  }
+
+  /**
+   * Below method will be used to get the masked byte range for dimension
+   * which is present in order by
+   *
+   * @param orderByDimensions order by dimension
+   * @param generator         key generator
+   * @param maskedRanges      masked byte range for dimension
+   * @return range of masked byte for order by dimension
+   */
+  public static int[][] getMaskedByteRangeForSorting(List<QueryDimension> orderByDimensions,
+      KeyGenerator generator, int[] maskedRanges) {
+    int[][] dimensionCompareIndex = new int[orderByDimensions.size()][];
+    int index = 0;
+    for (int i = 0; i < dimensionCompareIndex.length; i++) {
+      Set<Integer> integers = new TreeSet<Integer>();
+      if (!orderByDimensions.get(i).getDimension().getEncoder().contains(Encoding.DICTIONARY)
+          || orderByDimensions.get(i).getDimension().numberOfChild() > 0) {
+        continue;
+      }
+      int[] range =
+          generator.getKeyByteOffsets(orderByDimensions.get(i).getDimension().getKeyOrdinal());
+      for (int j = range[0]; j <= range[1]; j++) {
+        integers.add(j);
+      }
+      dimensionCompareIndex[index] = new int[integers.size()];
+      int j = 0;
+      for (Iterator<Integer> iterator = integers.iterator(); iterator.hasNext(); ) {
+        Integer integer = (Integer) iterator.next();
+        dimensionCompareIndex[index][j++] = integer.intValue();
+      }
+      index++;
+    }
+    for (int i = 0; i < dimensionCompareIndex.length; i++) {
+      if (null == dimensionCompareIndex[i]) {
+        continue;
+      }
+      int[] range = dimensionCompareIndex[i];
+      if (null != range) {
+        for (int j = 0; j < range.length; j++) {
+          for (int k = 0; k < maskedRanges.length; k++) {
+            if (range[j] == maskedRanges[k]) {
+              range[j] = k;
+              break;
+            }
+          }
+        }
+      }
+
+    }
+    return dimensionCompareIndex;
+  }
+
+  /**
+   * Below method will be used to get the masked key for sorting
+   *
+   * @param orderDimensions           query dimension
+   * @param generator                 key generator
+   * @param maskedByteRangeForSorting masked byte range for sorting
+   * @param maskedRanges              masked range
+   * @return masked byte range
+   * @throws QueryExecutionException
+   */
+  public static byte[][] getMaksedKeyForSorting(List<QueryDimension> orderDimensions,
+      KeyGenerator generator, int[][] maskedByteRangeForSorting, int[] maskedRanges)
+      throws QueryExecutionException {
+    byte[][] maskedKey = new byte[orderDimensions.size()][];
+    byte[] mdKey = null;
+    long[] key = null;
+    byte[] maskedMdKey = null;
+    try {
+      if (null != maskedByteRangeForSorting) {
+        for (int i = 0; i < maskedByteRangeForSorting.length; i++) {
+          if (null == maskedByteRangeForSorting[i]) {
+            continue;
+          }
+          key = new long[generator.getDimCount()];
+          maskedKey[i] = new byte[maskedByteRangeForSorting[i].length];
+          key[orderDimensions.get(i).getDimension().getKeyOrdinal()] = Long.MAX_VALUE;
+          mdKey = generator.generateKey(key);
+          maskedMdKey = new byte[maskedRanges.length];
+          for (int k = 0; k < maskedMdKey.length; k++) { // CHECKSTYLE:OFF
+            // Approval
+            // No:Approval-V1R2C10_001
+            maskedMdKey[k] = mdKey[maskedRanges[k]];
+          }
+          for (int j = 0; j < maskedByteRangeForSorting[i].length; j++) {
+            maskedKey[i][j] = maskedMdKey[maskedByteRangeForSorting[i][j]];
+          }// CHECKSTYLE:ON
+
+        }
+      }
+    } catch (KeyGenException e) {
+      throw new QueryExecutionException(e);
+    }
+    return maskedKey;
+  }
+
+  /**
+   * Below method will be used to get mapping whether dimension is present in
+   * order by or not
+   *
+   * @param sortedDimensions sort dimension present in order by query
+   * @param queryDimensions  query dimension
+   * @return sort dimension indexes
+   */
+  public static byte[] getSortDimensionIndexes(List<QueryDimension> sortedDimensions,
+      List<QueryDimension> queryDimensions) {
+    byte[] sortedDims = new byte[queryDimensions.size()];
+    int indexOf = 0;
+    for (int i = 0; i < sortedDims.length; i++) {
+      indexOf = sortedDimensions.indexOf(queryDimensions.get(i));
+      if (indexOf > -1) {
+        sortedDims[i] = 1;
+      }
+    }
+    return sortedDims;
+  }
+
+  /**
+   * Below method will be used to get the mapping of block index and its
+   * restructuring info
+   *
+   * @param queryDimensions   query dimension from query model
+   * @param segmentProperties segment properties
+   * @return map of block index to its restructuring info
+   * @throws KeyGenException if problem while key generation
+   */
+  public static Map<Integer, KeyStructureInfo> getColumnGroupKeyStructureInfo(
+      List<QueryDimension> queryDimensions, SegmentProperties segmentProperties)
+      throws KeyGenException {
+    Map<Integer, KeyStructureInfo> rowGroupToItsRSInfo = new HashMap<Integer, KeyStructureInfo>();
+    // get column group id and its ordinal mapping of column group
+    Map<Integer, List<Integer>> columnGroupAndItsOrdinalMappingForQuery =
+        getColumnGroupAndItsOrdinalMapping(queryDimensions);
+    Map<Integer, KeyGenerator> columnGroupAndItsKeygenartor =
+        segmentProperties.getColumnGroupAndItsKeygenartor();
+
+    Iterator<Entry<Integer, List<Integer>>> iterator =
+        columnGroupAndItsOrdinalMappingForQuery.entrySet().iterator();
+    KeyStructureInfo restructureInfos = null;
+    while (iterator.hasNext()) {
+      Entry<Integer, List<Integer>> next = iterator.next();
+      KeyGenerator keyGenerator = columnGroupAndItsKeygenartor.get(next.getKey());
+      restructureInfos = new KeyStructureInfo();
+      // sort the ordinal
+      List<Integer> ordinal = next.getValue();
+      List<Integer> mdKeyOrdinal = new ArrayList<Integer>();
+      //Un sorted
+      List<Integer> mdKeyOrdinalForQuery = new ArrayList<Integer>();
+      for (Integer ord : ordinal) {
+        mdKeyOrdinal.add(segmentProperties.getColumnGroupMdKeyOrdinal(next.getKey(), ord));
+        mdKeyOrdinalForQuery.add(segmentProperties.getColumnGroupMdKeyOrdinal(next.getKey(), ord));
+      }
+      Collections.sort(mdKeyOrdinal);
+      // get the masked byte range for column group
+      int[] maskByteRanges = getMaskedByteRangeBasedOrdinal(mdKeyOrdinal, keyGenerator);
+      // max key for column group
+      byte[] maxKey = getMaxKeyBasedOnOrinal(mdKeyOrdinal, keyGenerator);
+      // get masked key for column group
+      int[] maksedByte = getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
+      restructureInfos.setKeyGenerator(keyGenerator);
+      restructureInfos.setMaskByteRanges(maskByteRanges);
+      restructureInfos.setMaxKey(maxKey);
+      restructureInfos.setMaskedBytes(maksedByte);
+      restructureInfos.setMdkeyQueryDimensionOrdinal(ArrayUtils
+          .toPrimitive(mdKeyOrdinalForQuery.toArray(new Integer[mdKeyOrdinalForQuery.size()])));
+      rowGroupToItsRSInfo
+          .put(segmentProperties.getDimensionOrdinalToBlockMapping().get(ordinal.get(0)),
+              restructureInfos);
+    }
+    return rowGroupToItsRSInfo;
+  }
+
+  /**
+   * return true if given key is found in array
+   *
+   * @param data
+   * @param key
+   * @return
+   */
+  public static boolean searchInArray(int[] data, int key) {
+    for (int i = 0; i < data.length; i++) {
+      if (key == data[i]) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Below method will be used to create a mapping of column group columns
+   * this mapping will have column group id to all the dimension ordinal
+   * present in the column group This mapping will be used during query
+   * execution, to create a mask key for the column group dimension which will
+   * be used in aggregation and filter query as column group dimension will be
+   * stored in bit level
+   */
+  private static Map<Integer, List<Integer>> getColumnGroupAndItsOrdinalMapping(
+      List<QueryDimension> origdimensions) {
+
+    List<QueryDimension> dimensions = new ArrayList<QueryDimension>(origdimensions.size());
+    dimensions.addAll(origdimensions);
+    /**
+     * sort based on column group id
+     */
+    Collections.sort(dimensions, new Comparator<QueryDimension>() {
+
+      @Override public int compare(QueryDimension o1, QueryDimension o2) {
+        return Integer
+            .compare(o1.getDimension().columnGroupId(), o2.getDimension().columnGroupId());
+      }
+    });
+    // list of row groups this will store all the row group column
+    Map<Integer, List<Integer>> columnGroupAndItsOrdinalsMapping =
+        new HashMap<Integer, List<Integer>>();
+    // to store a column group
+    List<Integer> currentColumnGroup = null;
+    // current index
+    int index = 0;
+    // previous column group to check all the column of row id has bee
+    // selected
+    int prvColumnGroupId = -1;
+    while (index < dimensions.size()) {
+      // if dimension group id is not zero and it is same as the previous
+      // column group id
+      // then we need to add ordinal of that column as it belongs to same
+      // column group
+      if (!dimensions.get(index).getDimension().isColumnar()
+          && dimensions.get(index).getDimension().columnGroupId() == prvColumnGroupId
+          && null != currentColumnGroup) {
+        currentColumnGroup.add(dimensions.get(index).getDimension().getOrdinal());
+      }
+
+      // if dimension is not a columnar then it is column group column
+      else if (!dimensions.get(index).getDimension().isColumnar()) {
+        currentColumnGroup = new ArrayList<Integer>();
+        columnGroupAndItsOrdinalsMapping
+            .put(dimensions.get(index).getDimension().columnGroupId(), currentColumnGroup);
+        currentColumnGroup.add(dimensions.get(index).getDimension().getOrdinal());
+      }
+      // update the row id every time,this is required to group the
+      // columns
+      // of the same row group
+      prvColumnGroupId = dimensions.get(index).getDimension().columnGroupId();
+      index++;
+    }
+    return columnGroupAndItsOrdinalsMapping;
+  }
+
+  /**
+   * Below method will be used to get masked byte
+   *
+   * @param data           actual data
+   * @param maxKey         max key
+   * @param maskByteRanges mask byte range
+   * @param byteCount
+   * @return masked byte
+   */
+  public static byte[] getMaskedKey(byte[] data, byte[] maxKey, int[] maskByteRanges,
+      int byteCount) {
+    byte[] maskedKey = new byte[byteCount];
+    int counter = 0;
+    int byteRange = 0;
+    for (int i = 0; i < byteCount; i++) {
+      byteRange = maskByteRanges[i];
+      if (byteRange != -1) {
+        maskedKey[counter++] = (byte) (data[byteRange] & maxKey[byteRange]);
+      }
+    }
+    return maskedKey;
+  }
+
+  /**
+   * Below method will be used to fill block indexes of the query dimension
+   * which will be used in creating a output row Here is method we are passing
+   * two list which store the indexes one for dictionary column other for not
+   * dictionary column. This is done for specific purpose so that in one
+   * iteration we will be able to fill both type dimension block indexes
+   *
+   * @param queryDimensions                  dimension present in the query
+   * @param columnOrdinalToBlockIndexMapping column ordinal to block index mapping
+   * @param dictionaryDimensionBlockIndex    list to store dictionary column block indexes
+   * @param noDictionaryDimensionBlockIndex  list to store no dictionary block indexes
+   */
+  public static void fillQueryDimensionsBlockIndexes(List<QueryDimension> queryDimensions,
+      Map<Integer, Integer> columnOrdinalToBlockIndexMapping,
+      Set<Integer> dictionaryDimensionBlockIndex, List<Integer> noDictionaryDimensionBlockIndex) {
+    for (QueryDimension queryDimension : queryDimensions) {
+      if (CarbonUtil.hasEncoding(queryDimension.getDimension().getEncoder(), Encoding.DICTIONARY)
+          && queryDimension.getDimension().numberOfChild() == 0) {
+        dictionaryDimensionBlockIndex
+            .add(columnOrdinalToBlockIndexMapping.get(queryDimension.getDimension().getOrdinal()));
+      } else if (queryDimension.getDimension().numberOfChild() == 0) {
+        noDictionaryDimensionBlockIndex
+            .add(columnOrdinalToBlockIndexMapping.get(queryDimension.getDimension().getOrdinal()));
+      }
+    }
+  }
+
+  /**
+   * Below method will be used to resolve the query model
+   * resolve will be setting the actual dimension and measure object
+   * as from driver only column name will be passes to avoid the heavy object
+   * serialization
+   *
+   * @param queryModel query model
+   */
+  public static void resolveQueryModel(QueryModel queryModel) {
+    CarbonMetadata.getInstance().addCarbonTable(queryModel.getTable());
+    // TODO need to load the table from table identifier
+    CarbonTable carbonTable = queryModel.getTable();
+    String tableName =
+        queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName();
+    // resolve query dimension
+    for (QueryDimension queryDimension : queryModel.getQueryDimension()) {
+      queryDimension
+          .setDimension(carbonTable.getDimensionByName(tableName, queryDimension.getColumnName()));
+    }
+    // resolve sort dimension
+    for (QueryDimension sortDimension : queryModel.getSortDimension()) {
+      sortDimension
+          .setDimension(carbonTable.getDimensionByName(tableName, sortDimension.getColumnName()));
+    }
+    // resolve query measure
+    for (QueryMeasure queryMeasure : queryModel.getQueryMeasures()) {
+      // in case of count start column name will  be count * so
+      // first need to check any measure is present or not and as if measure
+      // if measure is present and if first measure is not a default
+      // measure than add measure otherwise
+      // than add first dimension as a measure
+      //as currently if measure is not present then
+      //we are adding default measure so first condition will
+      //never come false but if in future we can remove so not removing first if check
+      if (queryMeasure.getColumnName().equals("count(*)")) {
+        if (carbonTable.getMeasureByTableName(tableName).size() > 0 && !carbonTable
+            .getMeasureByTableName(tableName).get(0).getColName()
+            .equals(CarbonCommonConstants.DEFAULT_INVISIBLE_DUMMY_MEASURE)) {
+          queryMeasure.setMeasure(carbonTable.getMeasureByTableName(tableName).get(0));
+        } else {
+          CarbonMeasure dummyMeasure = new CarbonMeasure(
+              carbonTable.getDimensionByTableName(tableName).get(0).getColumnSchema(), 0);
+          queryMeasure.setMeasure(dummyMeasure);
+        }
+      } else {
+        queryMeasure
+            .setMeasure(carbonTable.getMeasureByName(tableName, queryMeasure.getColumnName()));
+      }
+    }
+  }
+
+  /**
+   * Below method will be used to get the index of number type aggregator
+   *
+   * @param aggType
+   * @return index in aggregator
+   */
+  public static int[] getNumberTypeIndex(List<String> aggType) {
+    List<Integer> indexList = new ArrayList<Integer>();
+    for (int i = 0; i < aggType.size(); i++) {
+      if (CarbonCommonConstants.SUM.equals(aggType.get(i)) || CarbonCommonConstants.AVERAGE
+          .equals(aggType.get(i))) {
+        indexList.add(i);
+      }
+    }
+    return ArrayUtils.toPrimitive(indexList.toArray(new Integer[indexList.size()]));
+  }
+
+  /**
+   * below method will be used to get the actual type aggregator
+   *
+   * @param aggType
+   * @return index in aggrgetor
+   */
+  public static int[] getActualTypeIndex(List<String> aggType) {
+    List<Integer> indexList = new ArrayList<Integer>();
+    for (int i = 0; i < aggType.size(); i++) {
+      if (!CarbonCommonConstants.SUM.equals(aggType.get(i)) && !CarbonCommonConstants.AVERAGE
+          .equals(aggType.get(i))) {
+        indexList.add(i);
+      }
+    }
+    return ArrayUtils.toPrimitive(indexList.toArray(new Integer[indexList.size()]));
+  }
+
+  /**
+   * Below method will be used to get the key structure for the column group
+   *
+   * @param segmentProperties      segment properties
+   * @param dimColumnEvaluatorInfo dimension evaluator info
+   * @return key structure info for column group dimension
+   * @throws KeyGenException
+   */
+  public static KeyStructureInfo getKeyStructureInfo(SegmentProperties segmentProperties,
+      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo) throws KeyGenException {
+    int colGrpId = getColumnGroupId(segmentProperties, dimColumnEvaluatorInfo.getColumnIndex());
+    KeyGenerator keyGenerator = segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
+    List<Integer> mdKeyOrdinal = new ArrayList<Integer>();
+
+    mdKeyOrdinal.add(segmentProperties
+        .getColumnGroupMdKeyOrdinal(colGrpId, dimColumnEvaluatorInfo.getColumnIndex()));
+    int[] maskByteRanges = QueryUtil.getMaskedByteRangeBasedOrdinal(mdKeyOrdinal, keyGenerator);
+    byte[] maxKey = QueryUtil.getMaxKeyBasedOnOrinal(mdKeyOrdinal, keyGenerator);
+    int[] maksedByte = QueryUtil.getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
+    KeyStructureInfo restructureInfos = new KeyStructureInfo();
+    restructureInfos.setKeyGenerator(keyGenerator);
+    restructureInfos.setMaskByteRanges(maskByteRanges);
+    restructureInfos.setMaxKey(maxKey);
+    restructureInfos.setMaskedBytes(maksedByte);
+    return restructureInfos;
+  }
+
+  /**
+   * Below method will be used to get the column group id based on the ordinal
+   *
+   * @param segmentProperties segment properties
+   * @param ordinal           ordinal to be searched
+   * @return column group id
+   */
+  public static int getColumnGroupId(SegmentProperties segmentProperties, int ordinal) {
+    int[][] columnGroups = segmentProperties.getColumnGroups();
+    int colGrpId = -1;
+    for (int i = 0; i < columnGroups.length; i++) {
+      if (columnGroups[i].length > 1) {
+        colGrpId++;
+        if (QueryUtil.searchInArray(columnGroups[i], ordinal)) {
+          break;
+        }
+      }
+    }
+    return colGrpId;
+  }
+
+  /**
+   * Below method will be used to get the map of for complex dimension and its type
+   * which will be used to during query execution to
+   *
+   * @param queryDimensions          complex dimension in query
+   * @param dimensionToBlockIndexMap dimension to block index in file map
+   * @return complex dimension and query type
+   */
+  public static Map<Integer, GenericQueryType> getComplexDimensionsMap(
+      List<QueryDimension> queryDimensions, Map<Integer, Integer> dimensionToBlockIndexMap,
+      int[] eachComplexColumnValueSize, Map<String, Dictionary> columnIdToDictionaryMap,
+      Set<CarbonDimension> filterDimensions) {
+    Map<Integer, GenericQueryType> complexTypeMap = new HashMap<Integer, GenericQueryType>();
+    for (QueryDimension dimension : queryDimensions) {
+      CarbonDimension actualDimension = dimension.getDimension();
+      if (actualDimension.getNumberOfChild() == 0) {
+        continue;
+      }
+      fillParentDetails(dimensionToBlockIndexMap, actualDimension, complexTypeMap,
+          eachComplexColumnValueSize, columnIdToDictionaryMap);
+    }
+    if (null != filterDimensions) {
+      for (CarbonDimension filterDimension : filterDimensions) {
+        fillParentDetails(dimensionToBlockIndexMap, filterDimension, complexTypeMap,
+            eachComplexColumnValueSize, columnIdToDictionaryMap);
+      }
+    }
+    return complexTypeMap;
+  }
+
+  private static GenericQueryType fillParentDetails(Map<Integer, Integer> dimensionToBlockIndexMap,
+      CarbonDimension dimension, Map<Integer, GenericQueryType> complexTypeMap,
+      int[] eachComplexColumnValueSize, Map<String, Dictionary> columnIdToDictionaryMap) {
+    int parentBlockIndex = dimensionToBlockIndexMap.get(dimension.getOrdinal());
+    GenericQueryType parentQueryType = dimension.getDataType().equals(DataType.ARRAY) ?
+        new ArrayQueryType(dimension.getColName(), dimension.getColName(), parentBlockIndex) :
+        new StructQueryType(dimension.getColName(), dimension.getColName(),
+            dimensionToBlockIndexMap.get(dimension.getOrdinal()));
+    complexTypeMap.put(dimension.getOrdinal(), parentQueryType);
+    parentBlockIndex =
+        fillChildrenDetails(eachComplexColumnValueSize, columnIdToDictionaryMap, parentBlockIndex,
+            dimension, parentQueryType);
+    return parentQueryType;
+  }
+
+  private static int fillChildrenDetails(int[] eachComplexColumnValueSize,
+      Map<String, Dictionary> columnIdToDictionaryMap, int parentBlockIndex,
+      CarbonDimension dimension, GenericQueryType parentQueryType) {
+    for (int i = 0; i < dimension.getNumberOfChild(); i++) {
+      switch (dimension.getListOfChildDimensions().get(i).getDataType()) {
+        case ARRAY:
+          parentQueryType.addChildren(
+              new ArrayQueryType(dimension.getListOfChildDimensions().get(i).getColName(),
+                  dimension.getColName(), ++parentBlockIndex));
+          break;
+        case STRUCT:
+          parentQueryType.addChildren(
+              new StructQueryType(dimension.getListOfChildDimensions().get(i).getColName(),
+                  dimension.getColName(), ++parentBlockIndex));
+          break;
+        default:
+          boolean isDirectDictionary = CarbonUtil
+              .hasEncoding(dimension.getListOfChildDimensions().get(i).getEncoder(),
+                  Encoding.DIRECT_DICTIONARY);
+          parentQueryType.addChildren(
+              new PrimitiveQueryType(dimension.getListOfChildDimensions().get(i).getColName(),
+                  dimension.getColName(), ++parentBlockIndex,
+                  dimension.getListOfChildDimensions().get(i).getDataType(),
+                  eachComplexColumnValueSize[dimension.getListOfChildDimensions().get(i)
+                      .getComplexTypeOrdinal()], columnIdToDictionaryMap
+                  .get(dimension.getListOfChildDimensions().get(i).getColumnId()),
+                  isDirectDictionary));
+      }
+      if (dimension.getListOfChildDimensions().get(i).getNumberOfChild() > 0) {
+        parentBlockIndex = fillChildrenDetails(eachComplexColumnValueSize, columnIdToDictionaryMap,
+            parentBlockIndex, dimension.getListOfChildDimensions().get(i), parentQueryType);
+      }
+    }
+    return parentBlockIndex;
+  }
+
+  public static Set<CarbonDimension> getAllFilterDimensions(FilterResolverIntf filterResolverTree) {
+    Set<CarbonDimension> filterDimensions = new HashSet<CarbonDimension>();
+    if (null == filterResolverTree) {
+      return filterDimensions;
+    }
+    List<ColumnExpression> dimensionResolvedInfos = new ArrayList<ColumnExpression>();
+    Expression filterExpression = filterResolverTree.getFilterExpression();
+    addColumnDimensions(filterExpression, filterDimensions);
+    for (ColumnExpression info : dimensionResolvedInfos) {
+      if (info.isDimension() && info.getDimension().getNumberOfChild() > 0) {
+        filterDimensions.add(info.getDimension());
+      }
+    }
+    return filterDimensions;
+
+  }
+
+  /**
+   * This method will check if a given expression contains a column expression
+   * recursively and add the dimension instance to the set which holds the dimension
+   * instances of the complex filter expressions.
+   *
+   * @param filterDimensions
+   * @return
+   */
+  private static void addColumnDimensions(Expression expression,
+      Set<CarbonDimension> filterDimensions) {
+    if (null != expression && expression instanceof ColumnExpression
+        && ((ColumnExpression) expression).isDimension()) {
+      filterDimensions.add(((ColumnExpression) expression).getDimension());
+      return;
+    }
+    for (Expression child : expression.getChildren()) {
+      addColumnDimensions(child, filterDimensions);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/executor/util/RestructureUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/executor/util/RestructureUtil.java b/core/src/main/java/org/apache/carbondata/scan/executor/util/RestructureUtil.java
new file mode 100644
index 0000000..a20fd53
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/executor/util/RestructureUtil.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.executor.util;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.scan.executor.infos.AggregatorInfo;
+import org.apache.carbondata.scan.model.QueryDimension;
+import org.apache.carbondata.scan.model.QueryMeasure;
+
+/**
+ * Utility class for restructuring
+ */
+public class RestructureUtil {
+
+  /**
+   * Below method will be used to get the updated query dimension updation
+   * means, after restructuring some dimension will be not present in older
+   * table blocks in that case we need to select only those dimension out of
+   * query dimension which is present in the current table block
+   *
+   * @param queryDimensions
+   * @param tableBlockDimensions
+   * @return list of query dimension which is present in the table block
+   */
+  public static List<QueryDimension> getUpdatedQueryDimension(List<QueryDimension> queryDimensions,
+      List<CarbonDimension> tableBlockDimensions, List<CarbonDimension> tableComplexDimension) {
+    List<QueryDimension> presentDimension =
+        new ArrayList<QueryDimension>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    // selecting only those dimension which is present in the query
+    for (QueryDimension queryDimimension : queryDimensions) {
+      for (CarbonDimension tableDimension : tableBlockDimensions) {
+        if (tableDimension.equals(queryDimimension.getDimension())) {
+          presentDimension.add(queryDimimension);
+        }
+      }
+    }
+    for (QueryDimension queryDimimension : queryDimensions) {
+      for (CarbonDimension tableDimension : tableComplexDimension) {
+        if (tableDimension.equals(queryDimimension.getDimension())) {
+          presentDimension.add(queryDimimension);
+        }
+      }
+    }
+    return presentDimension;
+  }
+
+  /**
+   * Below method is to add dimension children for complex type dimension as
+   * internally we are creating dimension column for each each complex
+   * dimension so when complex query dimension request will come in the query,
+   * we need to add its children as it is hidden from the user For example if
+   * complex dimension is of Array of String[2] so we are storing 3 dimension
+   * and when user will query for complex type i.e. array type we need to add
+   * its children and then we will read respective block and create a tuple
+   * based on all three dimension
+   *
+   * @param queryDimensions      current query dimensions
+   * @param tableBlockDimensions dimensions which is present in the table block
+   * @return updated dimension(after adding complex type children)
+   */
+  public static List<CarbonDimension> addChildrenForComplexTypeDimension(
+      List<CarbonDimension> queryDimensions, List<CarbonDimension> tableBlockDimensions) {
+    List<CarbonDimension> updatedQueryDimension = new ArrayList<CarbonDimension>();
+    int numberOfChildren = 0;
+    for (CarbonDimension queryDimension : queryDimensions) {
+      // if number of child is zero, then it is not a complex dimension
+      // so directly add it query dimension
+      if (queryDimension.numberOfChild() == 0) {
+        updatedQueryDimension.add(queryDimension);
+      }
+      // if number of child is more than 1 then add all its children
+      numberOfChildren = queryDimension.getOrdinal() + queryDimension.numberOfChild();
+      for (int j = queryDimension.getOrdinal(); j < numberOfChildren; j++) {
+        updatedQueryDimension.add(tableBlockDimensions.get(j));
+      }
+    }
+    return updatedQueryDimension;
+  }
+
+  /**
+   * Below method will be used to get the aggregator info object
+   * in this method some of the properties which will be extracted
+   * from query measure and current block measures will be set
+   *
+   * @param queryMeasures        measures present in query
+   * @param currentBlockMeasures current block measures
+   * @return aggregator info
+   */
+  public static AggregatorInfo getAggregatorInfos(List<QueryMeasure> queryMeasures,
+      List<CarbonMeasure> currentBlockMeasures) {
+    AggregatorInfo aggregatorInfos = new AggregatorInfo();
+    int numberOfMeasureInQuery = queryMeasures.size();
+    int[] measureOrdinals = new int[numberOfMeasureInQuery];
+    Object[] defaultValues = new Object[numberOfMeasureInQuery];
+    boolean[] measureExistsInCurrentBlock = new boolean[numberOfMeasureInQuery];
+    int index = 0;
+    for (QueryMeasure queryMeasure : queryMeasures) {
+      measureOrdinals[index] = queryMeasure.getMeasure().getOrdinal();
+      // if query measure exists in current dimension measures
+      // then setting measure exists is true
+      // otherwise adding a default value of a measure
+      if (currentBlockMeasures.contains(queryMeasure.getMeasure())) {
+        measureExistsInCurrentBlock[index] = true;
+      } else {
+        defaultValues[index] = queryMeasure.getMeasure().getDefaultValue();
+      }
+      index++;
+    }
+    aggregatorInfos.setDefaultValues(defaultValues);
+    aggregatorInfos.setMeasureOrdinals(measureOrdinals);
+    aggregatorInfos.setMeasureExists(measureExistsInCurrentBlock);
+    return aggregatorInfos;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/BinaryExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/BinaryExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/BinaryExpression.java
new file mode 100644
index 0000000..fbaaa81
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/BinaryExpression.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression;
+
+public abstract class BinaryExpression extends Expression {
+
+  /**
+   *
+   */
+  private static final long serialVersionUID = 1L;
+  /**
+   *
+   */
+
+  protected Expression left;
+  protected Expression right;
+  protected boolean isRangeExpression;
+
+  public BinaryExpression(Expression left, Expression right) {
+    this.left = left;
+    this.right = right;
+    children.add(left);
+    children.add(right);
+  }
+
+  public Expression getLeft() {
+    return left;
+  }
+
+  public Expression getRight() {
+    return right;
+  }
+
+  public boolean isRangeExpression() {
+    return isRangeExpression;
+  }
+
+  public void setRangeExpression(boolean isRangeExpression) {
+    this.isRangeExpression = isRangeExpression;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/ColumnExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/ColumnExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/ColumnExpression.java
new file mode 100644
index 0000000..433e53a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/ColumnExpression.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonColumn;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class ColumnExpression extends LeafExpression {
+
+  private static final long serialVersionUID = 1L;
+
+  private String columnName;
+
+  private boolean isDimension;
+
+  private int colIndex = -1;
+
+  private DataType dataType;
+
+  private CarbonDimension dimension;
+
+  private CarbonColumn carbonColumn;
+
+  public ColumnExpression(String columnName, DataType dataType) {
+    this.columnName = columnName;
+    this.dataType = dataType;
+
+  }
+
+  public CarbonDimension getDimension() {
+    return dimension;
+  }
+
+  public void setDimension(CarbonDimension dimension) {
+    this.dimension = dimension;
+  }
+
+  public String getColumnName() {
+    return columnName;
+  }
+
+  public void setColumnName(String columnName) {
+    this.columnName = columnName;
+  }
+
+  public boolean isDimension() {
+    return isDimension;
+  }
+
+  public void setDimension(boolean isDimension) {
+    this.isDimension = isDimension;
+  }
+
+  public int getColIndex() {
+    return colIndex;
+  }
+
+  public void setColIndex(int colIndex) {
+    this.colIndex = colIndex;
+  }
+
+  public DataType getDataType() {
+    return dataType;
+  }
+
+  public void setDataType(DataType dataType) {
+    this.dataType = dataType;
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value) {
+    ExpressionResult expressionResult =
+        new ExpressionResult(dataType, (null == value ? null : value.getVal(colIndex)));
+    return expressionResult;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+  @Override public String getString() {
+    // TODO Auto-generated method stub
+    return "ColumnExpression(" + columnName + ')';
+  }
+
+  public CarbonColumn getCarbonColumn() {
+    return carbonColumn;
+  }
+
+  public void setCarbonColumn(CarbonColumn carbonColumn) {
+    this.carbonColumn = carbonColumn;
+  }
+
+}



[03/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/intf/ExpressionType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/intf/ExpressionType.java b/core/src/main/java/org/carbondata/scan/filter/intf/ExpressionType.java
deleted file mode 100644
index 2459b5a..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/intf/ExpressionType.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter.intf;
-
-public enum ExpressionType {
-
-  AND,
-  OR,
-  NOT,
-  EQUALS,
-  NOT_EQUALS,
-  LESSTHAN,
-  LESSTHAN_EQUALTO,
-  GREATERTHAN,
-  GREATERTHAN_EQUALTO,
-  ADD,
-  SUBSTRACT,
-  DIVIDE,
-  MULTIPLY,
-  IN,
-  LIST,
-  NOT_IN,
-  UNKNOWN,
-  LITERAL,
-  RANGE
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/intf/FilterExecuterType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/intf/FilterExecuterType.java b/core/src/main/java/org/carbondata/scan/filter/intf/FilterExecuterType.java
deleted file mode 100644
index 2de575e..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/intf/FilterExecuterType.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.intf;
-
-import java.io.Serializable;
-
-public enum FilterExecuterType implements Serializable {
-
-  INCLUDE, EXCLUDE, OR, AND, RESTRUCTURE, ROWLEVEL, RANGE, ROWLEVEL_GREATERTHAN,
-  ROWLEVEL_GREATERTHAN_EQUALTO, ROWLEVEL_LESSTHAN_EQUALTO, ROWLEVEL_LESSTHAN
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/intf/RowImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/intf/RowImpl.java b/core/src/main/java/org/carbondata/scan/filter/intf/RowImpl.java
deleted file mode 100644
index 04e1a3d..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/intf/RowImpl.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter.intf;
-
-public class RowImpl implements RowIntf {
-  private Object[] row;
-
-  public RowImpl() {
-    row = new Object[0];
-  }
-
-  @Override public Object getVal(int index) {
-    return row[index];
-  }
-
-  @Override public Object[] getValues() {
-    return row;
-  }
-
-  @Override public void setValues(final Object[] row) {
-    this.row = row;
-  }
-
-  @Override public int size() {
-    return this.row.length;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/intf/RowIntf.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/intf/RowIntf.java b/core/src/main/java/org/carbondata/scan/filter/intf/RowIntf.java
deleted file mode 100644
index ddfa1eb..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/intf/RowIntf.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter.intf;
-
-public interface RowIntf {
-  Object getVal(int index);
-
-  Object[] getValues();
-
-  void setValues(Object[] setValues);
-
-  int size();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/AndFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/AndFilterResolverImpl.java b/core/src/main/java/org/carbondata/scan/filter/resolver/AndFilterResolverImpl.java
deleted file mode 100644
index 3485bb8..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/AndFilterResolverImpl.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.resolver;
-
-import java.util.List;
-import java.util.SortedMap;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.expression.BinaryExpression;
-import org.carbondata.scan.filter.intf.ExpressionType;
-
-public class AndFilterResolverImpl extends LogicalFilterResolverImpl {
-
-  /**
-   *i
-   */
-  private static final long serialVersionUID = -761688076874662001L;
-
-  public AndFilterResolverImpl(FilterResolverIntf leftEvalutor, FilterResolverIntf rightEvalutor,
-      ExpressionType filterExpressionType,BinaryExpression expression) {
-    super(leftEvalutor, rightEvalutor, expression);
-  }
-
-  @Override public void getStartKey(long[] startKeys,
-      SortedMap<Integer, byte[]> noDicStartKeys, List<long[]> startKeyList)
-      throws QueryExecutionException {
-    leftEvalutor.getStartKey(startKeys, noDicStartKeys, startKeyList);
-    rightEvalutor.getStartKey(startKeys, noDicStartKeys, startKeyList);
-  }
-
-  @Override public void getEndKey(SegmentProperties segmentProperties,
-      AbsoluteTableIdentifier tableIdentifier, long[] endKeys,
-      SortedMap<Integer, byte[]> noDicEndKeys, List<long[]> endKeyList)
-      throws QueryExecutionException {
-    leftEvalutor.getEndKey(segmentProperties, tableIdentifier, endKeys, noDicEndKeys, endKeyList);
-    rightEvalutor.getEndKey(segmentProperties, tableIdentifier, endKeys, noDicEndKeys, endKeyList);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/ConditionalFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/ConditionalFilterResolverImpl.java b/core/src/main/java/org/carbondata/scan/filter/resolver/ConditionalFilterResolverImpl.java
deleted file mode 100644
index e943ff1..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/ConditionalFilterResolverImpl.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.resolver;
-
-import java.util.List;
-import java.util.SortedMap;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.conditional.BinaryConditionalExpression;
-import org.carbondata.scan.expression.conditional.ConditionalExpression;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.intf.FilterExecuterType;
-import org.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.filter.resolver.resolverinfo.visitor.FilterInfoTypeVisitorFactory;
-
-public class ConditionalFilterResolverImpl implements FilterResolverIntf {
-
-  private static final long serialVersionUID = 1838955268462201691L;
-  protected Expression exp;
-  protected boolean isExpressionResolve;
-  protected boolean isIncludeFilter;
-  private DimColumnResolvedFilterInfo dimColResolvedFilterInfo;
-
-  public ConditionalFilterResolverImpl(Expression exp, boolean isExpressionResolve,
-      boolean isIncludeFilter) {
-    this.exp = exp;
-    this.isExpressionResolve = isExpressionResolve;
-    this.isIncludeFilter = isIncludeFilter;
-    this.dimColResolvedFilterInfo = new DimColumnResolvedFilterInfo();
-  }
-
-  /**
-   * This API will resolve the filter expression and generates the
-   * dictionaries for executing/evaluating the filter expressions in the
-   * executer layer.
-   *
-   * @throws QueryExecutionException
-   * @throws FilterUnsupportedException
-   */
-  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier)
-      throws FilterUnsupportedException {
-    FilterResolverMetadata metadata = new FilterResolverMetadata();
-    metadata.setTableIdentifier(absoluteTableIdentifier);
-    if ((!isExpressionResolve) && exp instanceof BinaryConditionalExpression) {
-      BinaryConditionalExpression binaryConditionalExpression = (BinaryConditionalExpression) exp;
-      Expression leftExp = binaryConditionalExpression.getLeft();
-      Expression rightExp = binaryConditionalExpression.getRight();
-      if (leftExp instanceof ColumnExpression) {
-        ColumnExpression columnExpression = (ColumnExpression) leftExp;
-        metadata.setColumnExpression(columnExpression);
-        metadata.setExpression(rightExp);
-        metadata.setIncludeFilter(isIncludeFilter);
-        // If imei=imei comes in filter condition then we need to
-        // skip processing of right expression.
-        // This flow has reached here assuming that this is a single
-        // column expression.
-        // we need to check if the other expression contains column
-        // expression or not in depth.
-        CarbonDimension dimension = columnExpression.getDimension();
-        if (FilterUtil.checkIfExpressionContainsColumn(rightExp)
-            || FilterUtil.isExpressionNeedsToResolved(rightExp, isIncludeFilter) &&
-            dimension.hasEncoding(Encoding.DICTIONARY) && !dimension
-            .hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-          isExpressionResolve = true;
-        } else {
-          //Visitor pattern is been used in this scenario inorder to populate the
-          // dimColResolvedFilterInfo
-          //visitable object with filter member values based on the visitor type, currently there
-          //3 types of visitors custom,direct and no dictionary, all types of visitor populate
-          //the visitable instance as per its buisness logic which is different for all the
-          // visitors.
-          dimColResolvedFilterInfo.populateFilterInfoBasedOnColumnType(
-              FilterInfoTypeVisitorFactory.getResolvedFilterInfoVisitor(columnExpression),
-              metadata);
-        }
-      } else if (rightExp instanceof ColumnExpression) {
-        ColumnExpression columnExpression = (ColumnExpression) rightExp;
-        metadata.setColumnExpression(columnExpression);
-        metadata.setExpression(leftExp);
-        metadata.setIncludeFilter(isIncludeFilter);
-        if (columnExpression.getDataType().equals(DataType.TIMESTAMP)) {
-          isExpressionResolve = true;
-        } else {
-          // if imei=imei comes in filter condition then we need to
-          // skip processing of right expression.
-          // This flow has reached here assuming that this is a single
-          // column expression.
-          // we need to check if the other expression contains column
-          // expression or not in depth.
-          if (FilterUtil.checkIfExpressionContainsColumn(leftExp)) {
-            isExpressionResolve = true;
-          } else {
-
-            dimColResolvedFilterInfo.populateFilterInfoBasedOnColumnType(
-                FilterInfoTypeVisitorFactory.getResolvedFilterInfoVisitor(columnExpression),
-                metadata);
-
-          }
-        }
-      } else {
-        isExpressionResolve = true;
-      }
-    }
-    if (isExpressionResolve && exp instanceof ConditionalExpression) {
-      ConditionalExpression conditionalExpression = (ConditionalExpression) exp;
-      List<ColumnExpression> columnList = conditionalExpression.getColumnList();
-      metadata.setColumnExpression(columnList.get(0));
-      metadata.setExpression(exp);
-      metadata.setIncludeFilter(isIncludeFilter);
-      if (!columnList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY)) {
-        dimColResolvedFilterInfo.populateFilterInfoBasedOnColumnType(
-            FilterInfoTypeVisitorFactory.getResolvedFilterInfoVisitor(columnList.get(0)), metadata);
-
-      } else if (columnList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY) && !(
-          columnList.get(0).getDimension().getDataType()
-              == org.carbondata.core.carbon.metadata.datatype.DataType.STRUCT
-              || columnList.get(0).getDimension().getDataType()
-              == org.carbondata.core.carbon.metadata.datatype.DataType.ARRAY)) {
-        dimColResolvedFilterInfo.setFilterValues(FilterUtil
-            .getFilterListForAllValues(absoluteTableIdentifier, exp, columnList.get(0),
-                isIncludeFilter));
-
-        dimColResolvedFilterInfo.setColumnIndex(columnList.get(0).getDimension().getOrdinal());
-        dimColResolvedFilterInfo.setDimension(columnList.get(0).getDimension());
-      }
-    }
-
-  }
-
-  /**
-   * Left node will not be presentin this scenario
-   *
-   * @return left node of type FilterResolverIntf instance
-   */
-  public FilterResolverIntf getLeft() {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  /**
-   * Right node will not be presentin this scenario
-   *
-   * @return left node of type FilterResolverIntf instance
-   */
-  @Override public FilterResolverIntf getRight() {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  /**
-   * Method will return the DimColumnResolvedFilterInfo instance which consists
-   * the mapping of the respective dimension and its surrogates involved in
-   * filter expression.
-   *
-   * @return DimColumnResolvedFilterInfo
-   */
-  public DimColumnResolvedFilterInfo getDimColResolvedFilterInfo() {
-    return dimColResolvedFilterInfo;
-  }
-
-  /**
-   * method will calculates the start key based on the filter surrogates
-   */
-  public void getStartKey(long[] startKey,
-      SortedMap<Integer, byte[]> setOfStartKeyByteArray, List<long[]> startKeyList)
-      throws QueryExecutionException {
-    if (null == dimColResolvedFilterInfo.getStarIndexKey()) {
-      FilterUtil.getStartKey(dimColResolvedFilterInfo.getDimensionResolvedFilterInstance(),
-          startKey, startKeyList);
-      FilterUtil.getStartKeyForNoDictionaryDimension(dimColResolvedFilterInfo,
-          setOfStartKeyByteArray);
-    }
-  }
-
-  /**
-   * method will get the start key based on the filter surrogates
-   *
-   * @return end IndexKey
-   * @throws QueryExecutionException
-   */
-  @Override public void getEndKey(SegmentProperties segmentProperties,
-      AbsoluteTableIdentifier absoluteTableIdentifier, long[] endKeys,
-      SortedMap<Integer, byte[]> setOfEndKeyByteArray, List<long[]> endKeyList)
-      throws QueryExecutionException {
-    if (null == dimColResolvedFilterInfo.getEndIndexKey()) {
-      FilterUtil.getEndKey(dimColResolvedFilterInfo.getDimensionResolvedFilterInstance(),
-          absoluteTableIdentifier, endKeys, segmentProperties, endKeyList);
-      FilterUtil.getEndKeyForNoDictionaryDimension(dimColResolvedFilterInfo,
-          setOfEndKeyByteArray);
-    }
-  }
-
-  /**
-   * Method will return the executer type for particular conditional resolver
-   * basically two types of executers will be formed for the conditional query.
-   *
-   * @return the filter executer type
-   */
-  @Override public FilterExecuterType getFilterExecuterType() {
-    switch (exp.getFilterExpressionType()) {
-      case NOT_EQUALS:
-      case NOT_IN:
-        return FilterExecuterType.EXCLUDE;
-
-      default:
-        return FilterExecuterType.INCLUDE;
-    }
-
-  }
-
-  @Override public Expression getFilterExpression() {
-    // TODO Auto-generated method stub
-    return exp;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/FilterResolverIntf.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/FilterResolverIntf.java b/core/src/main/java/org/carbondata/scan/filter/resolver/FilterResolverIntf.java
deleted file mode 100644
index e8bb24d..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/FilterResolverIntf.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.resolver;
-
-import java.io.Serializable;
-import java.util.List;
-import java.util.SortedMap;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.FilterExecuterType;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-
-public interface FilterResolverIntf extends Serializable {
-
-  /**
-   * This API will resolve the filter expression and generates the
-   * dictionaries for executing/evaluating the filter expressions in the
-   * executer layer.
-   *
-   * @throws QueryExecutionException
-   * @throws FilterUnsupportedException
-   */
-  void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) throws FilterUnsupportedException;
-
-  /**
-   * This API will provide the left column filter expression
-   * inorder to resolve the left expression filter.
-   *
-   * @return FilterResolverIntf
-   */
-  FilterResolverIntf getLeft();
-
-  /**
-   * API will provide the right column filter expression inorder to resolve
-   * the right expression filter.
-   *
-   * @return FilterResolverIntf
-   */
-  FilterResolverIntf getRight();
-
-  /**
-   * API will return the resolved filter instance, this instance will provide
-   * the resolved surrogates based on the applied filter
-   *
-   * @return DimColumnResolvedFilterInfo object
-   */
-  DimColumnResolvedFilterInfo getDimColResolvedFilterInfo();
-
-  /**
-   * API will get the start key based on the filter applied based on the key generator
-   *
-   * @param segmentProperties
-   * @param startKey
-   * @param setOfStartKeyByteArray
-   */
-  void getStartKey(long[] startKey, SortedMap<Integer, byte[]> setOfStartKeyByteArray,
-      List<long[]> startKeyList) throws QueryExecutionException;
-
-  /**
-   * API will read the end key based on the max surrogate of
-   * particular dimension column
-   *
-   * @param setOfEndKeyByteArray
-   * @param endKeys
-   * @return
-   * @throws QueryExecutionException
-   */
-  void getEndKey(SegmentProperties segmentProperties, AbsoluteTableIdentifier tableIdentifier,
-      long[] endKeys, SortedMap<Integer, byte[]> setOfEndKeyByteArray, List<long[]> endKeyList)
-      throws QueryExecutionException;
-
-  /**
-   * API will return the filter executer type which will be used to evaluate
-   * the resolved filter while query execution
-   *
-   * @return FilterExecuterType.
-   */
-  FilterExecuterType getFilterExecuterType();
-
-  Expression getFilterExpression();
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/LogicalFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/LogicalFilterResolverImpl.java b/core/src/main/java/org/carbondata/scan/filter/resolver/LogicalFilterResolverImpl.java
deleted file mode 100644
index 31c4be6..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/LogicalFilterResolverImpl.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter.resolver;
-
-import java.util.List;
-import java.util.SortedMap;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.expression.BinaryExpression;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.FilterExecuterType;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-
-public class LogicalFilterResolverImpl implements FilterResolverIntf {
-  /**
-   *
-   */
-  private static final long serialVersionUID = 5734382980564402914L;
-
-  protected FilterResolverIntf leftEvalutor;
-
-  protected FilterResolverIntf rightEvalutor;
-
-  protected ExpressionType filterExpressionType;
-
-  private BinaryExpression filterExpression;
-
-  public LogicalFilterResolverImpl(FilterResolverIntf leftEvalutor,
-      FilterResolverIntf rightEvalutor,BinaryExpression currentExpression) {
-    this.leftEvalutor = leftEvalutor;
-    this.rightEvalutor = rightEvalutor;
-    this.filterExpressionType = currentExpression.getFilterExpressionType();
-    this.filterExpression=currentExpression;
-  }
-
-  /**
-   * Logical filter resolver will return the left and right filter expresison
-   * node for filter evaluation, so in this instance no implementation is required.
-   *
-   * @param absoluteTableIdentifier
-   */
-  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) {
-
-  }
-
-  /**
-   * Since its a binary condition expresion the getLeft method will get the left
-   * node of filter expression
-   *
-   * @return FilterResolverIntf.
-   */
-  public FilterResolverIntf getLeft() {
-    return leftEvalutor;
-  }
-
-  /**
-   * Since its a binary condition expresion the getRight method will get the left
-   * node of filter expression
-   *
-   * @return FilterResolverIntf.
-   */
-  public FilterResolverIntf getRight() {
-    return rightEvalutor;
-  }
-
-  @Override public DimColumnResolvedFilterInfo getDimColResolvedFilterInfo() {
-    return null;
-  }
-
-  @Override public void getStartKey(long[] startKey,
-      SortedMap<Integer, byte[]> setOfStartKeyByteArray, List<long[]> startKeyList)
-      throws QueryExecutionException {
-
-  }
-
-  @Override public void getEndKey(SegmentProperties segmentProperties,
-      AbsoluteTableIdentifier tableIdentifier, long[] endKeys,
-      SortedMap<Integer, byte[]> setOfEndKeyByteArray, List<long[]> endKeyList)
-      throws QueryExecutionException {
-
-    }
-
-    @Override public FilterExecuterType getFilterExecuterType() {
-    switch (filterExpressionType) {
-      case OR:
-        return FilterExecuterType.OR;
-      case AND:
-        return FilterExecuterType.AND;
-
-      default:
-        return null;
-    }
-  }
-
-  @Override public Expression getFilterExpression() {
-    return filterExpression;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/RestructureFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/RestructureFilterResolverImpl.java b/core/src/main/java/org/carbondata/scan/filter/resolver/RestructureFilterResolverImpl.java
deleted file mode 100644
index 6335377..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/RestructureFilterResolverImpl.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter.resolver;
-
-import java.util.List;
-import java.util.SortedMap;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.conditional.BinaryConditionalExpression;
-import org.carbondata.scan.expression.conditional.ConditionalExpression;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.intf.FilterExecuterType;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-
-public class RestructureFilterResolverImpl implements FilterResolverIntf {
-  /**
-   *
-   */
-  private static final long serialVersionUID = -5399656036192814524L;
-
-  protected DimColumnResolvedFilterInfo dimColumnResolvedFilterInfo;
-
-  private Expression exp;
-
-  private String defaultValue;
-
-  private int surrogate;
-
-  private boolean isExpressionResolve;
-
-  private boolean isIncludeFilter;
-
-  public RestructureFilterResolverImpl(Expression exp, String defaultValue, int surrogate,
-      boolean isExpressionResolve, boolean isIncludeFilter) {
-    dimColumnResolvedFilterInfo = new DimColumnResolvedFilterInfo();
-    this.exp = exp;
-    this.defaultValue = defaultValue;
-    this.surrogate = surrogate;
-    this.isExpressionResolve = isExpressionResolve;
-    this.isIncludeFilter = isIncludeFilter;
-  }
-
-  /**
-   * Method will resolve the filters and it will replace the newly added dimension with default
-   * value
-   *
-   * @param absoluteTableIdentifier
-   * @throws FilterUnsupportedException
-   */
-  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier)
-      throws FilterUnsupportedException {
-
-    DimColumnResolvedFilterInfo dimColumnResolvedFilterInfo = new DimColumnResolvedFilterInfo();
-    if (!this.isExpressionResolve && exp instanceof BinaryConditionalExpression) {
-      BinaryConditionalExpression binaryConditionalExpression = (BinaryConditionalExpression) exp;
-      Expression left = binaryConditionalExpression.getLeft();
-      Expression right = binaryConditionalExpression.getRight();
-      if (left instanceof ColumnExpression) {
-        ColumnExpression columnExpression = (ColumnExpression) left;
-        if (columnExpression.getDataType().equals(DataType.TIMESTAMP)) {
-          isExpressionResolve = true;
-        } else {
-          // If imei=imei comes in filter condition then we need to
-          // skip processing of right expression.
-          // This flow has reached here assuming that this is a single
-          // column expression.
-          // we need to check if the other expression contains column
-          // expression or not in depth.
-          if (FilterUtil.checkIfExpressionContainsColumn(right)) {
-            isExpressionResolve = true;
-          } else {
-            dimColumnResolvedFilterInfo
-                .setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
-            dimColumnResolvedFilterInfo.setFilterValues(
-                FilterUtil.getFilterListForRS(right, columnExpression, defaultValue, surrogate));
-          }
-        }
-      } else if (right instanceof ColumnExpression) {
-        ColumnExpression columnExpression = (ColumnExpression) right;
-        if (columnExpression.getDataType().equals(DataType.TIMESTAMP)) {
-          isExpressionResolve = true;
-        } else {
-
-          // If imei=imei comes in filter condition then we need to
-          // skip processing of right expression.
-          // This flow has reached here assuming that this is a single
-          // column expression.
-          // we need to check if the other expression contains column
-          // expression or not in depth.
-          if (checkIfExpressionContainsColumn(left)) {
-            isExpressionResolve = true;
-          } else {
-            dimColumnResolvedFilterInfo
-                .setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
-            dimColumnResolvedFilterInfo.setFilterValues(
-                FilterUtil.getFilterListForRS(left, columnExpression, defaultValue, surrogate));
-          }
-        }
-      }
-    }
-    if (this.isExpressionResolve && exp instanceof ConditionalExpression) {
-      ConditionalExpression conditionalExpression = (ConditionalExpression) exp;
-      List<ColumnExpression> columnList = conditionalExpression.getColumnList();
-      dimColumnResolvedFilterInfo.setColumnIndex(columnList.get(0).getDimension().getOrdinal());
-      dimColumnResolvedFilterInfo.setFilterValues(FilterUtil
-          .getFilterListForAllMembersRS(exp, columnList.get(0), defaultValue, surrogate,
-              isIncludeFilter));
-    }
-
-  }
-
-  /**
-   * This method will check if a given expression contains a column expression recursively.
-   *
-   * @return boolean
-   */
-  private boolean checkIfExpressionContainsColumn(Expression expression) {
-    if (expression instanceof ColumnExpression) {
-      return true;
-    }
-    for (Expression child : expression.getChildren()) {
-      if (checkIfExpressionContainsColumn(child)) {
-        return true;
-      }
-    }
-
-    return false;
-  }
-
-  @Override public FilterResolverIntf getLeft() {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override public FilterResolverIntf getRight() {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  /**
-   * Method will return the DimColumnResolvedFilterInfo instance which consists
-   * the mapping of the respective dimension and its surrogates involved in
-   * filter expression.
-   *
-   * @return DimColumnResolvedFilterInfo
-   */
-  public DimColumnResolvedFilterInfo getDimColResolvedFilterInfo() {
-    return dimColumnResolvedFilterInfo;
-  }
-
-  /**
-   * For restructure resolver no implementation is required for getting
-   * the start key since it already has default values
-   */
-  @Override public void getStartKey(long[] startKeys,
-      SortedMap<Integer, byte[]> noDicStartKeys, List<long[]> startKeyList) {
-
-  }
-
-  /**
-   * For restructure resolver no implementation is required for getting
-   * the end  key since it already has default values
-   *
-   * @return IndexKey.
-   */
-  @Override public void getEndKey(SegmentProperties segmentProperties,
-      AbsoluteTableIdentifier tableIdentifier, long[] endKeys,
-      SortedMap<Integer, byte[]> noDicEndKeys, List<long[]> endKeyList) {
-  }
-
-  /**
-   * Method will get the executer type inorder to create filter executer tree
-   *
-   * @return FilterExecuterType
-   */
-  @Override public FilterExecuterType getFilterExecuterType() {
-    return FilterExecuterType.RESTRUCTURE;
-  }
-
-  @Override public Expression getFilterExpression() {
-    // TODO Auto-generated method stub
-    return exp;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/RowLevelFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/RowLevelFilterResolverImpl.java b/core/src/main/java/org/carbondata/scan/filter/resolver/RowLevelFilterResolverImpl.java
deleted file mode 100644
index 7a26c12..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/RowLevelFilterResolverImpl.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter.resolver;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.conditional.ConditionalExpression;
-import org.carbondata.scan.filter.intf.FilterExecuterType;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
-
-public class RowLevelFilterResolverImpl extends ConditionalFilterResolverImpl {
-
-  private static final long serialVersionUID = 176122729713729929L;
-  protected boolean isExpressionResolve;
-  protected boolean isIncludeFilter;
-
-  private List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList;
-  private List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList;
-  private AbsoluteTableIdentifier tableIdentifier;
-
-  public RowLevelFilterResolverImpl(Expression exp, boolean isExpressionResolve,
-      boolean isIncludeFilter, AbsoluteTableIdentifier tableIdentifier) {
-    super(exp, isExpressionResolve, isIncludeFilter);
-    dimColEvaluatorInfoList =
-        new ArrayList<DimColumnResolvedFilterInfo>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    msrColEvalutorInfoList = new ArrayList<MeasureColumnResolvedFilterInfo>(
-        CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    this.tableIdentifier = tableIdentifier;
-  }
-
-  /**
-   * Method which will resolve the filter expression by converting the filter member
-   * to its assigned dictionary values.
-   */
-  public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) {
-    DimColumnResolvedFilterInfo dimColumnEvaluatorInfo = null;
-    MeasureColumnResolvedFilterInfo msrColumnEvalutorInfo = null;
-    int index = 0;
-    if (exp instanceof ConditionalExpression) {
-      ConditionalExpression conditionalExpression = (ConditionalExpression) exp;
-      List<ColumnExpression> columnList = conditionalExpression.getColumnList();
-      for (ColumnExpression columnExpression : columnList) {
-        if (columnExpression.isDimension()) {
-          dimColumnEvaluatorInfo = new DimColumnResolvedFilterInfo();
-          dimColumnEvaluatorInfo.setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
-          dimColumnEvaluatorInfo.setRowIndex(index++);
-          dimColumnEvaluatorInfo.setDimension(columnExpression.getDimension());
-          dimColumnEvaluatorInfo.setDimensionExistsInCurrentSilce(false);
-          dimColEvaluatorInfoList.add(dimColumnEvaluatorInfo);
-        } else {
-          msrColumnEvalutorInfo = new MeasureColumnResolvedFilterInfo();
-          msrColumnEvalutorInfo.setRowIndex(index++);
-          msrColumnEvalutorInfo.setAggregator(
-              ((CarbonMeasure) columnExpression.getCarbonColumn()).getAggregateFunction());
-          msrColumnEvalutorInfo
-              .setColumnIndex(((CarbonMeasure) columnExpression.getCarbonColumn()).getOrdinal());
-          msrColumnEvalutorInfo.setType(columnExpression.getCarbonColumn().getDataType());
-          msrColEvalutorInfoList.add(msrColumnEvalutorInfo);
-        }
-      }
-    }
-  }
-
-  /**
-   * This method will provide the executer type to the callee inorder to identify
-   * the executer type for the filter resolution, Row level filter executer is a
-   * special executer since it get all the rows of the specified filter dimension
-   * and will be send to the spark for processing
-   */
-  @Override public FilterExecuterType getFilterExecuterType() {
-    return FilterExecuterType.ROWLEVEL;
-  }
-
-  /**
-   * Method will the read filter expression corresponding to the resolver.
-   * This method is required in row level executer inorder to evaluate the filter
-   * expression against spark, as mentioned above row level is a special type
-   * filter resolver.
-   *
-   * @return Expression
-   */
-  public Expression getFilterExpresion() {
-    return exp;
-  }
-
-  /**
-   * Method will return the DimColumnResolvedFilterInfo instance which consists
-   * the mapping of the respective dimension and its surrogates involved in
-   * filter expression.
-   *
-   * @return DimColumnResolvedFilterInfo
-   */
-  public List<DimColumnResolvedFilterInfo> getDimColEvaluatorInfoList() {
-    return dimColEvaluatorInfoList;
-  }
-
-  /**
-   * Method will return the DimColumnResolvedFilterInfo instance which containts
-   * measure level details.
-   *
-   * @return MeasureColumnResolvedFilterInfo
-   */
-  public List<MeasureColumnResolvedFilterInfo> getMsrColEvalutorInfoList() {
-    return msrColEvalutorInfoList;
-  }
-
-  /**
-   * Method will return table information which will be required for retrieving
-   * dictionary cache inorder to read all the members of respective dimension.
-   *
-   * @return AbsoluteTableIdentifier
-   */
-  public AbsoluteTableIdentifier getTableIdentifier() {
-    return tableIdentifier;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java b/core/src/main/java/org/carbondata/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
deleted file mode 100644
index f88040d..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter.resolver;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.SortedMap;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.ExpressionResult;
-import org.carbondata.scan.expression.conditional.BinaryConditionalExpression;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.expression.logical.BinaryLogicalExpression;
-import org.carbondata.scan.filter.DimColumnFilterInfo;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.intf.FilterExecuterType;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
-
-public class RowLevelRangeFilterResolverImpl extends ConditionalFilterResolverImpl {
-
-  /**
-   *
-   */
-  private static final long serialVersionUID = 6629319265336666789L;
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(RowLevelRangeFilterResolverImpl.class.getName());
-  private List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList;
-  private List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList;
-  private AbsoluteTableIdentifier tableIdentifier;
-
-  public RowLevelRangeFilterResolverImpl(Expression exp, boolean isExpressionResolve,
-      boolean isIncludeFilter, AbsoluteTableIdentifier tableIdentifier) {
-    super(exp, isExpressionResolve, isIncludeFilter);
-    dimColEvaluatorInfoList =
-        new ArrayList<DimColumnResolvedFilterInfo>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    msrColEvalutorInfoList = new ArrayList<MeasureColumnResolvedFilterInfo>(
-        CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    this.tableIdentifier = tableIdentifier;
-  }
-
-  /**
-   * This method will return the filter values which is present in the range level
-   * conditional expressions.
-   *
-   * @return
-   */
-  public byte[][] getFilterRangeValues(SegmentProperties segmentProperties) {
-
-    if (null != dimColEvaluatorInfoList.get(0).getFilterValues() && !dimColEvaluatorInfoList.get(0)
-        .getDimension().hasEncoding(Encoding.DICTIONARY)) {
-      List<byte[]> noDictFilterValuesList =
-          dimColEvaluatorInfoList.get(0).getFilterValues().getNoDictionaryFilterValuesList();
-      return noDictFilterValuesList.toArray((new byte[noDictFilterValuesList.size()][]));
-    } else if (null != dimColEvaluatorInfoList.get(0).getFilterValues() && dimColEvaluatorInfoList
-        .get(0).getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-      return FilterUtil.getKeyArray(this.dimColEvaluatorInfoList.get(0).getFilterValues(),
-          this.dimColEvaluatorInfoList.get(0).getDimension(),
-          segmentProperties.getDimensionKeyGenerator());
-    }
-    return null;
-
-  }
-
-  /**
-   * method will get the start key based on the filter surrogates
-   *
-   * @return start IndexKey
-   */
-  public void getStartKey(long[] startKey,
-      SortedMap<Integer, byte[]> noDictStartKeys, List<long[]> startKeyList) {
-    if (null == dimColEvaluatorInfoList.get(0).getStarIndexKey()) {
-      try {
-        FilterUtil.getStartKey(dimColEvaluatorInfoList.get(0).getDimensionResolvedFilterInstance(),
-            startKey, startKeyList);
-        FilterUtil
-            .getStartKeyForNoDictionaryDimension(dimColEvaluatorInfoList.get(0), noDictStartKeys);
-      } catch (QueryExecutionException e) {
-        LOGGER.error("Can not get the start key during block prune");
-      }
-    }
-  }
-
-  /**
-   * method will get the start key based on the filter surrogates
-   *
-   * @return end IndexKey
-   */
-  @Override public void getEndKey(SegmentProperties segmentProperties,
-      AbsoluteTableIdentifier absoluteTableIdentifier, long[] endKeys,
-      SortedMap<Integer, byte[]> noDicEndKeys, List<long[]> endKeyList) {
-    if (null == dimColEvaluatorInfoList.get(0).getEndIndexKey()) {
-      try {
-        FilterUtil.getEndKey(dimColEvaluatorInfoList.get(0).getDimensionResolvedFilterInstance(),
-            absoluteTableIdentifier, endKeys, segmentProperties, endKeyList);
-        FilterUtil
-            .getEndKeyForNoDictionaryDimension(dimColEvaluatorInfoList.get(0), noDicEndKeys);
-      } catch (QueryExecutionException e) {
-        // TODO Auto-generated catch block
-        LOGGER.error("Can not get the end key during block prune");
-      }
-    }
-  }
-
-  private List<byte[]> getNoDictionaryRangeValues() {
-    List<ExpressionResult> listOfExpressionResults = new ArrayList<ExpressionResult>(20);
-    if (this.getFilterExpression() instanceof BinaryConditionalExpression) {
-      listOfExpressionResults =
-          ((BinaryConditionalExpression) this.getFilterExpression()).getLiterals();
-    }
-    List<byte[]> filterValuesList = new ArrayList<byte[]>(20);
-    boolean invalidRowsPresent = false;
-    for (ExpressionResult result : listOfExpressionResults) {
-      try {
-        if (result.getString() == null) {
-          filterValuesList.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL.getBytes());
-          continue;
-        }
-        filterValuesList.add(result.getString().getBytes());
-      } catch (FilterIllegalMemberException e) {
-        // Any invalid member while evaluation shall be ignored, system will log the
-        // error only once since all rows the evaluation happens so inorder to avoid
-        // too much log inforation only once the log will be printed.
-        FilterUtil.logError(e, invalidRowsPresent);
-      }
-    }
-    Comparator<byte[]> filterNoDictValueComaparator = new Comparator<byte[]>() {
-      @Override public int compare(byte[] filterMember1, byte[] filterMember2) {
-        return ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterMember1, filterMember2);
-      }
-
-    };
-    Collections.sort(filterValuesList, filterNoDictValueComaparator);
-    return filterValuesList;
-  }
-
-  /**
-   * Method which will resolve the filter expression by converting the filter
-   * member to its assigned dictionary values.
-   */
-  public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) {
-    DimColumnResolvedFilterInfo dimColumnEvaluatorInfo = null;
-    MeasureColumnResolvedFilterInfo msrColumnEvalutorInfo = null;
-    int index = 0;
-    if (exp instanceof BinaryLogicalExpression) {
-      BinaryLogicalExpression conditionalExpression = (BinaryLogicalExpression) exp;
-      List<ColumnExpression> columnList = conditionalExpression.getColumnList();
-      for (ColumnExpression columnExpression : columnList) {
-        if (columnExpression.isDimension()) {
-          dimColumnEvaluatorInfo = new DimColumnResolvedFilterInfo();
-          DimColumnFilterInfo filterInfo = new DimColumnFilterInfo();
-          dimColumnEvaluatorInfo.setColumnIndex(columnExpression.getCarbonColumn().getOrdinal());
-          //dimColumnEvaluatorInfo.se
-          dimColumnEvaluatorInfo.setRowIndex(index++);
-          dimColumnEvaluatorInfo.setDimension(columnExpression.getDimension());
-          dimColumnEvaluatorInfo.setDimensionExistsInCurrentSilce(false);
-          if (columnExpression.getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-            filterInfo.setFilterList(getDirectSurrogateValues(columnExpression));
-          } else {
-            filterInfo.setFilterListForNoDictionaryCols(getNoDictionaryRangeValues());
-          }
-          filterInfo.setIncludeFilter(isIncludeFilter);
-          dimColumnEvaluatorInfo.setFilterValues(filterInfo);
-          dimColumnEvaluatorInfo
-              .addDimensionResolvedFilterInstance(columnExpression.getDimension(), filterInfo);
-          dimColEvaluatorInfoList.add(dimColumnEvaluatorInfo);
-        } else {
-          msrColumnEvalutorInfo = new MeasureColumnResolvedFilterInfo();
-          msrColumnEvalutorInfo.setRowIndex(index++);
-          msrColumnEvalutorInfo.setAggregator(
-              ((CarbonMeasure) columnExpression.getCarbonColumn()).getAggregateFunction());
-          msrColumnEvalutorInfo
-              .setColumnIndex(((CarbonMeasure) columnExpression.getCarbonColumn()).getOrdinal());
-          msrColumnEvalutorInfo.setType(columnExpression.getCarbonColumn().getDataType());
-          msrColEvalutorInfoList.add(msrColumnEvalutorInfo);
-        }
-      }
-    }
-  }
-
-  private List<Integer> getDirectSurrogateValues(ColumnExpression columnExpression) {
-    List<ExpressionResult> listOfExpressionResults = new ArrayList<ExpressionResult>(20);
-    DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
-        .getDirectDictionaryGenerator(columnExpression.getDimension().getDataType());
-
-    if (this.getFilterExpression() instanceof BinaryConditionalExpression) {
-      listOfExpressionResults =
-          ((BinaryConditionalExpression) this.getFilterExpression()).getLiterals();
-    }
-    List<Integer> filterValuesList = new ArrayList<Integer>(20);
-    try {
-      // if any filter member provided by user is invalid throw error else
-      // system can display inconsistent result.
-      for (ExpressionResult result : listOfExpressionResults) {
-        filterValuesList.add(directDictionaryGenerator
-            .generateDirectSurrogateKey(result.getString(),
-                CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
-      }
-    } catch (FilterIllegalMemberException e) {
-      new FilterUnsupportedException(e);
-    }
-    return filterValuesList;
-  }
-
-  /**
-   * Method will return the DimColumnResolvedFilterInfo instance which consists
-   * the mapping of the respective dimension and its surrogates involved in
-   * filter expression.
-   *
-   * @return DimColumnResolvedFilterInfo
-   */
-  public List<DimColumnResolvedFilterInfo> getDimColEvaluatorInfoList() {
-    return dimColEvaluatorInfoList;
-  }
-
-  /**
-   * Method will return the DimColumnResolvedFilterInfo instance which containts
-   * measure level details.
-   *
-   * @return MeasureColumnResolvedFilterInfo
-   */
-  public List<MeasureColumnResolvedFilterInfo> getMsrColEvalutorInfoList() {
-    return msrColEvalutorInfoList;
-  }
-
-  public AbsoluteTableIdentifier getTableIdentifier() {
-    return tableIdentifier;
-  }
-
-  public Expression getFilterExpression() {
-    return this.exp;
-  }
-
-  /**
-   * This method will provide the executer type to the callee inorder to identify
-   * the executer type for the filter resolution, Row level filter executer is a
-   * special executer since it get all the rows of the specified filter dimension
-   * and will be send to the spark for processing
-   */
-  public FilterExecuterType getFilterExecuterType() {
-    switch (exp.getFilterExpressionType()) {
-      case GREATERTHAN:
-        return FilterExecuterType.ROWLEVEL_GREATERTHAN;
-      case GREATERTHAN_EQUALTO:
-        return FilterExecuterType.ROWLEVEL_GREATERTHAN_EQUALTO;
-      case LESSTHAN:
-        return FilterExecuterType.ROWLEVEL_LESSTHAN;
-      case LESSTHAN_EQUALTO:
-        return FilterExecuterType.ROWLEVEL_LESSTHAN_EQUALTO;
-
-      default:
-        return FilterExecuterType.ROWLEVEL;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/metadata/FilterResolverMetadata.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/metadata/FilterResolverMetadata.java b/core/src/main/java/org/carbondata/scan/filter/resolver/metadata/FilterResolverMetadata.java
deleted file mode 100644
index c684f5f..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/metadata/FilterResolverMetadata.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.resolver.metadata;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.Expression;
-
-public class FilterResolverMetadata {
-  private AbsoluteTableIdentifier tableIdentifier;
-  private Expression expression;
-  private ColumnExpression columnExpression;
-  private boolean isIncludeFilter;
-
-  public AbsoluteTableIdentifier getTableIdentifier() {
-    return tableIdentifier;
-  }
-
-  public void setTableIdentifier(AbsoluteTableIdentifier tableIdentifier) {
-    this.tableIdentifier = tableIdentifier;
-  }
-
-  public Expression getExpression() {
-    return expression;
-  }
-
-  public void setExpression(Expression expression) {
-    this.expression = expression;
-  }
-
-  public ColumnExpression getColumnExpression() {
-    return columnExpression;
-  }
-
-  public void setColumnExpression(ColumnExpression columnExpression) {
-    this.columnExpression = columnExpression;
-  }
-
-  public boolean isIncludeFilter() {
-    return isIncludeFilter;
-  }
-
-  public void setIncludeFilter(boolean isIncludeFilter) {
-    this.isIncludeFilter = isIncludeFilter;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java b/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java
deleted file mode 100644
index e5b70db..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/DimColumnResolvedFilterInfo.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter.resolver.resolverinfo;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.DimColumnFilterInfo;
-import org.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
-import org.carbondata.scan.filter.resolver.resolverinfo.visitable.ResolvedFilterInfoVisitable;
-import org.carbondata.scan.filter.resolver.resolverinfo.visitor.ResolvedFilterInfoVisitorIntf;
-
-public class DimColumnResolvedFilterInfo implements Serializable, ResolvedFilterInfoVisitable {
-  /**
-   *
-   */
-  private static final long serialVersionUID = 3428115141211084114L;
-
-  /**
-   * column index in file
-   */
-  private int columnIndex = -1;
-
-  /**
-   * need compressed data from file
-   */
-  private boolean needCompressedData;
-
-  /**
-   * rowIndex
-   */
-  private int rowIndex = -1;
-
-  private boolean isDimensionExistsInCurrentSilce = true;
-
-  private int rsSurrogates;
-
-  private String defaultValue;
-
-  private CarbonDimension dimension;
-
-  /**
-   * start index key of the block based on the keygenerator
-   */
-  private transient IndexKey starIndexKey;
-
-  /**
-   * end index key  which is been formed considering the max surrogate values
-   * from dictionary cache
-   */
-  private transient IndexKey endIndexKey;
-
-  /**
-   * reolved filter object of a particlar filter Expression.
-   */
-  private DimColumnFilterInfo resolvedFilterValueObj;
-
-  private Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionResolvedFilter;
-
-  public DimColumnResolvedFilterInfo() {
-    dimensionResolvedFilter = new HashMap<CarbonDimension, List<DimColumnFilterInfo>>(20);
-  }
-
-  public IndexKey getStarIndexKey() {
-    return starIndexKey;
-  }
-
-  public void setStarIndexKey(IndexKey starIndexKey) {
-    this.starIndexKey = starIndexKey;
-  }
-
-  public IndexKey getEndIndexKey() {
-    return endIndexKey;
-  }
-
-  public void setEndIndexKey(IndexKey endIndexKey) {
-    this.endIndexKey = endIndexKey;
-  }
-
-  public void addDimensionResolvedFilterInstance(CarbonDimension dimension,
-      DimColumnFilterInfo filterResolvedObj) {
-    List<DimColumnFilterInfo> currentVals = dimensionResolvedFilter.get(dimension);
-    if (null == currentVals) {
-      currentVals = new ArrayList<DimColumnFilterInfo>(20);
-      currentVals.add(filterResolvedObj);
-      dimensionResolvedFilter.put(dimension, currentVals);
-    } else {
-      currentVals.add(filterResolvedObj);
-    }
-  }
-
-  public Map<CarbonDimension, List<DimColumnFilterInfo>> getDimensionResolvedFilterInstance() {
-    return dimensionResolvedFilter;
-  }
-
-  public CarbonDimension getDimension() {
-    return dimension;
-  }
-
-  public void setDimension(CarbonDimension dimension) {
-    this.dimension = dimension;
-  }
-
-  public int getColumnIndex() {
-    return columnIndex;
-  }
-
-  public void setColumnIndex(int columnIndex) {
-    this.columnIndex = columnIndex;
-  }
-
-  public boolean isNeedCompressedData() {
-    return needCompressedData;
-  }
-
-  public void setNeedCompressedData(boolean needCompressedData) {
-    this.needCompressedData = needCompressedData;
-  }
-
-  public DimColumnFilterInfo getFilterValues() {
-    return resolvedFilterValueObj;
-  }
-
-  public void setFilterValues(final DimColumnFilterInfo resolvedFilterValueObj) {
-    this.resolvedFilterValueObj = resolvedFilterValueObj;
-  }
-
-  public int getRowIndex() {
-    return rowIndex;
-  }
-
-  public void setRowIndex(int rowIndex) {
-    this.rowIndex = rowIndex;
-  }
-
-  public boolean isDimensionExistsInCurrentSilce() {
-    return isDimensionExistsInCurrentSilce;
-  }
-
-  public void setDimensionExistsInCurrentSilce(boolean isDimensionExistsInCurrentSilce) {
-    this.isDimensionExistsInCurrentSilce = isDimensionExistsInCurrentSilce;
-  }
-
-  public int getRsSurrogates() {
-    return rsSurrogates;
-  }
-
-  public void setRsSurrogates(int rsSurrogates) {
-    this.rsSurrogates = rsSurrogates;
-  }
-
-  public String getDefaultValue() {
-    return defaultValue;
-  }
-
-  public void setDefaultValue(String defaultValue) {
-    this.defaultValue = defaultValue;
-  }
-
-  @Override public void populateFilterInfoBasedOnColumnType(ResolvedFilterInfoVisitorIntf visitor,
-      FilterResolverMetadata metadata) throws FilterUnsupportedException {
-    if (null != visitor) {
-      visitor.populateFilterResolvedInfo(this, metadata);
-      this.addDimensionResolvedFilterInstance(metadata.getColumnExpression().getDimension(),
-          this.getFilterValues());
-      this.setDimension(metadata.getColumnExpression().getDimension());
-      this.setColumnIndex(metadata.getColumnExpression().getDimension().getOrdinal());
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java b/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java
deleted file mode 100644
index d4cac8c..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/MeasureColumnResolvedFilterInfo.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.filter.resolver.resolverinfo;
-
-import java.io.Serializable;
-
-public class MeasureColumnResolvedFilterInfo implements Serializable {
-  /**
-   *
-   */
-  private static final long serialVersionUID = 4222568289115151561L;
-
-  private int columnIndex = -1;
-
-  private int rowIndex = -1;
-
-  private Object uniqueValue;
-
-  private String aggregator;
-
-  private boolean isMeasureExistsInCurrentSlice = true;
-
-  private Object defaultValue;
-
-  private org.carbondata.core.carbon.metadata.datatype.DataType type;
-
-  public int getColumnIndex() {
-    return columnIndex;
-  }
-
-  public void setColumnIndex(int columnIndex) {
-    this.columnIndex = columnIndex;
-  }
-
-  public int getRowIndex() {
-    return rowIndex;
-  }
-
-  public void setRowIndex(int rowIndex) {
-    this.rowIndex = rowIndex;
-  }
-
-  public Object getUniqueValue() {
-    return uniqueValue;
-  }
-
-  public void setUniqueValue(Object uniqueValue) {
-    this.uniqueValue = uniqueValue;
-  }
-
-  public org.carbondata.core.carbon.metadata.datatype.DataType getType() {
-    return type;
-  }
-
-  public void setType(org.carbondata.core.carbon.metadata.datatype.DataType dataType) {
-    this.type = dataType;
-  }
-
-  /**
-   * @return Returns the aggregator.
-   */
-  public String getAggregator() {
-    return aggregator;
-  }
-
-  /**
-   * @param aggregator The aggregator to set.
-   */
-  public void setAggregator(String aggregator) {
-    this.aggregator = aggregator;
-  }
-
-  public boolean isMeasureExistsInCurrentSlice() {
-    return isMeasureExistsInCurrentSlice;
-  }
-
-  public void setMeasureExistsInCurrentSlice(boolean isMeasureExistsInCurrentSlice) {
-    this.isMeasureExistsInCurrentSlice = isMeasureExistsInCurrentSlice;
-  }
-
-  public Object getDefaultValue() {
-    return defaultValue;
-  }
-
-  public void setDefaultValue(double defaultValue) {
-    this.defaultValue = defaultValue;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitable/ResolvedFilterInfoVisitable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitable/ResolvedFilterInfoVisitable.java b/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitable/ResolvedFilterInfoVisitable.java
deleted file mode 100644
index 24762ae..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitable/ResolvedFilterInfoVisitable.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.resolver.resolverinfo.visitable;
-
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
-import org.carbondata.scan.filter.resolver.resolverinfo.visitor.ResolvedFilterInfoVisitorIntf;
-
-public interface ResolvedFilterInfoVisitable {
-  /**
-   * This visitable method will visit through the visitor classes which is passed as parameter
-   * and based on different visitor the visitable filter instance will be resolved.
-   *
-   * @param visitor
-   * @param metadata
-   * @throws QueryExecutionException
-   * @throws FilterUnsupportedException
-   */
-  void populateFilterInfoBasedOnColumnType(ResolvedFilterInfoVisitorIntf visitor,
-      FilterResolverMetadata metadata) throws FilterUnsupportedException;
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java b/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java
deleted file mode 100644
index 5dd27ce..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/CustomTypeDictionaryVisitor.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.resolver.resolverinfo.visitor;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.DimColumnFilterInfo;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-
-public class CustomTypeDictionaryVisitor implements ResolvedFilterInfoVisitorIntf {
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(CustomTypeDictionaryVisitor.class.getName());
-
-  /**
-   * This Visitor method is been used to resolve or populate the filter details
-   * by using custom type dictionary value, the filter membrers will be resolved using
-   * custom type function which will generate dictionary for the direct column type filter members
-   *
-   * @param visitableObj
-   * @param metadata
-   * @throws FilterUnsupportedException,if exception occurs while evaluating
-   * filter models.
-   */
-  public void populateFilterResolvedInfo(DimColumnResolvedFilterInfo visitableObj,
-      FilterResolverMetadata metadata) throws FilterUnsupportedException {
-    DimColumnFilterInfo resolvedFilterObject = null;
-
-    List<String> evaluateResultListFinal;
-    try {
-      evaluateResultListFinal = metadata.getExpression().evaluate(null).getListAsString();
-    } catch (FilterIllegalMemberException e) {
-      throw new FilterUnsupportedException(e);
-    }
-    boolean isNotTimestampType = FilterUtil.checkIfDataTypeNotTimeStamp(metadata.getExpression());
-    resolvedFilterObject = getDirectDictionaryValKeyMemberForFilter(metadata.getTableIdentifier(),
-        metadata.getColumnExpression(), evaluateResultListFinal, metadata.isIncludeFilter(),
-        isNotTimestampType);
-    if (!metadata.isIncludeFilter() && null != resolvedFilterObject && !resolvedFilterObject
-        .getFilterList().contains(CarbonCommonConstants.MEMBER_DEFAULT_VAL_SURROGATE_KEY)) {
-      // Adding default surrogate key of null member inorder to not display the same while
-      // displaying the report as per hive compatibility.
-      resolvedFilterObject.getFilterList()
-          .add(CarbonCommonConstants.MEMBER_DEFAULT_VAL_SURROGATE_KEY);
-      Collections.sort(resolvedFilterObject.getFilterList());
-    }
-    visitableObj.setFilterValues(resolvedFilterObject);
-  }
-
-  private DimColumnFilterInfo getDirectDictionaryValKeyMemberForFilter(
-      AbsoluteTableIdentifier tableIdentifier, ColumnExpression columnExpression,
-      List<String> evaluateResultListFinal, boolean isIncludeFilter, boolean isNotTimestampType) {
-    List<Integer> surrogates = new ArrayList<Integer>(20);
-    DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
-        .getDirectDictionaryGenerator(columnExpression.getDimension().getDataType());
-    // Reading the dictionary value direct
-    getSurrogateValuesForDictionary(evaluateResultListFinal, surrogates, isNotTimestampType,
-        directDictionaryGenerator);
-
-    Collections.sort(surrogates);
-    DimColumnFilterInfo columnFilterInfo = null;
-    if (surrogates.size() > 0) {
-      columnFilterInfo = new DimColumnFilterInfo();
-      columnFilterInfo.setIncludeFilter(isIncludeFilter);
-      columnFilterInfo.setFilterList(surrogates);
-    }
-    return columnFilterInfo;
-  }
-
-  private void getSurrogateValuesForDictionary(List<String> evaluateResultListFinal,
-      List<Integer> surrogates, boolean isNotTimestampType,
-      DirectDictionaryGenerator directDictionaryGenerator) {
-    String timeFormat = CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT;
-    if (isNotTimestampType) {
-      timeFormat = null;
-    }
-    for (String filterMember : evaluateResultListFinal) {
-      surrogates
-          .add(directDictionaryGenerator.generateDirectSurrogateKey(filterMember, timeFormat));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java b/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java
deleted file mode 100644
index 8bd45e3..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.resolver.resolverinfo.visitor;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.DimColumnFilterInfo;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.resolver.metadata.FilterResolverMetadata;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-
-public class DictionaryColumnVisitor implements ResolvedFilterInfoVisitorIntf {
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(DictionaryColumnVisitor.class.getName());
-
-  /**
-   * This Visitor method is used to populate the visitableObj with direct dictionary filter details
-   * where the filters values will be resolve using dictionary cache.
-   *
-   * @param visitableObj
-   * @param metadata
-   * @throws FilterUnsupportedException,if exception occurs while evaluating
-   * filter models.
-   * @throws QueryExecutionException
-   */
-  public void populateFilterResolvedInfo(DimColumnResolvedFilterInfo visitableObj,
-      FilterResolverMetadata metadata) throws FilterUnsupportedException {
-    DimColumnFilterInfo resolvedFilterObject = null;
-    List<String> evaluateResultListFinal;
-    try {
-      evaluateResultListFinal = metadata.getExpression().evaluate(null).getListAsString();
-    } catch (FilterIllegalMemberException e) {
-      throw new FilterUnsupportedException(e);
-    }
-    try {
-      resolvedFilterObject = FilterUtil
-          .getFilterValues(metadata.getTableIdentifier(), metadata.getColumnExpression(),
-              evaluateResultListFinal, metadata.isIncludeFilter());
-      if (!metadata.isIncludeFilter() && null != resolvedFilterObject) {
-        // Adding default surrogate key of null member inorder to not display the same while
-        // displaying the report as per hive compatibility.
-        resolvedFilterObject.getFilterList()
-            .add(CarbonCommonConstants.MEMBER_DEFAULT_VAL_SURROGATE_KEY);
-        Collections.sort(resolvedFilterObject.getFilterList());
-      }
-    } catch (QueryExecutionException e) {
-      throw new FilterUnsupportedException(e);
-    }
-    visitableObj.setFilterValues(resolvedFilterObject);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/FilterInfoTypeVisitorFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/FilterInfoTypeVisitorFactory.java b/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/FilterInfoTypeVisitorFactory.java
deleted file mode 100644
index 35d9d70..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/resolver/resolverinfo/visitor/FilterInfoTypeVisitorFactory.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.resolver.resolverinfo.visitor;
-
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.scan.expression.ColumnExpression;
-
-public class FilterInfoTypeVisitorFactory {
-
-  /**
-   * This factory method will be used in order to get the visitor instance based on the
-   * column expression metadata where filters has been applied.
-   *
-   * @param columnExpression
-   * @return
-   */
-  public static ResolvedFilterInfoVisitorIntf getResolvedFilterInfoVisitor(
-      ColumnExpression columnExpression) {
-    if (columnExpression.getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-      return new CustomTypeDictionaryVisitor();
-    } else if (!columnExpression.getDimension().hasEncoding(Encoding.DICTIONARY)) {
-      return new NoDictionaryTypeVisitor();
-    } else if (columnExpression.getDimension().hasEncoding(Encoding.DICTIONARY)) {
-      return new DictionaryColumnVisitor();
-    }
-
-    return null;
-  }
-}


[08/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/infos/AggregatorInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/infos/AggregatorInfo.java b/core/src/main/java/org/carbondata/scan/executor/infos/AggregatorInfo.java
deleted file mode 100644
index 2c163e1..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/infos/AggregatorInfo.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor.infos;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-
-/**
- * Info class which store all the details
- * which is required during aggregation
- */
-public class AggregatorInfo {
-
-  /**
-   * selected query measure ordinal
-   * which will be used to read the measures chunk data
-   * this will be storing the index of the measure in measures chunk
-   */
-  private int[] measureOrdinals;
-
-  /**
-   * This parameter will be used to
-   * check whether particular measure is present
-   * in the table block, if not then its default value will be used
-   */
-  private boolean[] measureExists;
-
-  /**
-   * this default value will be used to when some measure is not present
-   * in the table block, in case of restructuring of the table if user is adding any
-   * measure then in older block that measure wont be present so for measure default value
-   * will be used to aggregate in the older table block query execution
-   */
-  private Object[] defaultValues;
-
-  /**
-   * In carbon there are three type of aggregation
-   * (dimension aggregation, expression aggregation and measure aggregation)
-   * Below index will be used to set the start position of expression in measures
-   * aggregator array
-   */
-  private int expressionAggregatorStartIndex;
-
-  /**
-   * In carbon there are three type of aggregation
-   * (dimension aggregation, expression aggregation and measure aggregation)
-   * Below index will be used to set the start position of measures in measures
-   * aggregator array
-   */
-  private int measureAggregatorStartIndex;
-
-  /**
-   * Datatype of each measure;
-   */
-  private DataType[] measureDataTypes;
-
-  /**
-   * @return the measureOrdinal
-   */
-  public int[] getMeasureOrdinals() {
-    return measureOrdinals;
-  }
-
-  /**
-   * @param measureOrdinal the measureOrdinal to set
-   */
-  public void setMeasureOrdinals(int[] measureOrdinal) {
-    this.measureOrdinals = measureOrdinal;
-  }
-
-  /**
-   * @return the measureExists
-   */
-  public boolean[] getMeasureExists() {
-    return measureExists;
-  }
-
-  /**
-   * @param measureExists the measureExists to set
-   */
-  public void setMeasureExists(boolean[] measureExists) {
-    this.measureExists = measureExists;
-  }
-
-  /**
-   * @return the defaultValues
-   */
-  public Object[] getDefaultValues() {
-    return defaultValues;
-  }
-
-  /**
-   * @param defaultValues the defaultValues to set
-   */
-  public void setDefaultValues(Object[] defaultValues) {
-    this.defaultValues = defaultValues;
-  }
-
-  /**
-   * @return the expressionAggregatorStartIndex
-   */
-  public int getExpressionAggregatorStartIndex() {
-    return expressionAggregatorStartIndex;
-  }
-
-  /**
-   * @param expressionAggregatorStartIndex the expressionAggregatorStartIndex to set
-   */
-  public void setExpressionAggregatorStartIndex(int expressionAggregatorStartIndex) {
-    this.expressionAggregatorStartIndex = expressionAggregatorStartIndex;
-  }
-
-  /**
-   * @return the measureAggregatorStartIndex
-   */
-  public int getMeasureAggregatorStartIndex() {
-    return measureAggregatorStartIndex;
-  }
-
-  /**
-   * @param measureAggregatorStartIndex the measureAggregatorStartIndex to set
-   */
-  public void setMeasureAggregatorStartIndex(int measureAggregatorStartIndex) {
-    this.measureAggregatorStartIndex = measureAggregatorStartIndex;
-  }
-
-  public DataType[] getMeasureDataTypes() {
-    return measureDataTypes;
-  }
-
-  public void setMeasureDataTypes(DataType[] measureDataTypes) {
-    this.measureDataTypes = measureDataTypes;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/infos/BlockExecutionInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/infos/BlockExecutionInfo.java b/core/src/main/java/org/carbondata/scan/executor/infos/BlockExecutionInfo.java
deleted file mode 100644
index 883357f..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/infos/BlockExecutionInfo.java
+++ /dev/null
@@ -1,681 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor.infos;
-
-import java.util.Map;
-
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.carbon.datastore.block.AbstractIndex;
-import org.carbondata.core.carbon.querystatistics.QueryStatisticsRecorder;
-import org.carbondata.core.datastorage.store.impl.FileFactory.FileType;
-import org.carbondata.core.keygenerator.KeyGenerator;
-import org.carbondata.scan.filter.GenericQueryType;
-import org.carbondata.scan.filter.executer.FilterExecuter;
-import org.carbondata.scan.model.QueryDimension;
-import org.carbondata.scan.model.QueryMeasure;
-
-/**
- * Below class will have all the properties which needed during query execution
- * for one block
- */
-public class BlockExecutionInfo {
-
-  /**
-   * block on which query will be executed
-   */
-  private AbstractIndex blockIndex;
-
-  /**
-   * each segment key size can be different and in that case we need to update
-   * the fixed key with latest segment key generator. so this property will
-   * tell whether this is required or not if key size is same then it is not
-   * required
-   */
-  private boolean isFixedKeyUpdateRequired;
-
-  /**
-   * in case of detail+order by query when number of output record is same we
-   * need to store data in the disk, so for this check will be used to whether
-   * we can write in the disk or not
-   */
-  private boolean isFileBasedQuery;
-
-  /**
-   * id of the query. this will be used to create directory while writing the
-   * data file in case of detail+order by query
-   */
-  private String queryId;
-
-  /**
-   * this to handle limit query in case of detail query we are pushing down
-   * the limit to executor level so based on the number of limit we can
-   * process only that many records
-   */
-  private int limit;
-
-  /**
-   * below to store all the information required for aggregation during query
-   * execution
-   */
-  private AggregatorInfo aggregatorInfo;
-
-  /**
-   * this will be used to get the first tentative block from which query
-   * execution start, this will be useful in case of filter query to get the
-   * start block based on filter values
-   */
-  private IndexKey startKey;
-
-  /**
-   * this will be used to get the last tentative block till which scanning
-   * will be done, this will be useful in case of filter query to get the last
-   * block based on filter values
-   */
-  private IndexKey endKey;
-
-  /**
-   * masked byte for block which will be used to unpack the fixed length key,
-   * this will be used for updating the older block key with new block key
-   * generator
-   */
-  private int[] maskedByteForBlock;
-
-  /**
-   * flag to check whether query is detail query or aggregation query
-   */
-  private boolean isDetailQuery;
-
-  /**
-   * total number of dimension in block
-   */
-  private int totalNumberDimensionBlock;
-
-  /**
-   * total number of measure in block
-   */
-  private int totalNumberOfMeasureBlock;
-
-  /**
-   * will be used to read the dimension block from file
-   */
-  private int[] allSelectedDimensionBlocksIndexes;
-
-  /**
-   * will be used to read the measure block from file
-   */
-  private int[] allSelectedMeasureBlocksIndexes;
-
-  /**
-   * this will be used to update the older block fixed length keys with the
-   * new block fixed length key
-   */
-  private KeyStructureInfo keyStructureInfo;
-
-  /**
-   * below will be used to sort the data based
-   */
-  private SortInfo sortInfo;
-
-  /**
-   * first block from which query execution will start
-   */
-  private DataRefNode firstDataBlock;
-
-  /**
-   * number of block to be scanned in the query
-   */
-  private long numberOfBlockToScan;
-
-  /**
-   * key size of the fixed length dimension column
-   */
-  private int fixedLengthKeySize;
-
-  /**
-   * dictionary column block indexes based on query
-   */
-  private int[] dictionaryColumnBlockIndex;
-  /**
-   * no dictionary column block indexes in based on the query order
-   */
-  private int[] noDictionaryBlockIndexes;
-
-  /**
-   * key generator used for generating the table block fixed length key
-   */
-  private KeyGenerator blockKeyGenerator;
-
-  /**
-   * each column value size
-   */
-  private int[] eachColumnValueSize;
-
-  /**
-   * partition number
-   */
-  private String partitionId;
-
-  /**
-   * column group block index in file to key structure info mapping
-   */
-  private Map<Integer, KeyStructureInfo> columnGroupToKeyStructureInfo;
-
-  /**
-   * mapping of dictionary dimension to its dictionary mapping which will be
-   * used to get the actual data from dictionary for aggregation, sorting
-   */
-  private Map<String, Dictionary> columnIdToDcitionaryMapping;
-
-  /**
-   * filter tree to execute the filter
-   */
-  private FilterExecuter filterExecuterTree;
-
-  /**
-   * fileType
-   */
-  private FileType fileType;
-
-  /**
-   * whether it needs only raw byte records with out aggregation.
-   */
-  private boolean isRawRecordDetailQuery;
-
-  /**
-   * complexParentIndexToQueryMap
-   */
-  private Map<Integer, GenericQueryType> complexParentIndexToQueryMap;
-
-  /**
-   * complex dimension parent block indexes;
-   */
-  private int[] complexColumnParentBlockIndexes;
-
-  /**
-   * to record the statistics
-   */
-  private QueryStatisticsRecorder statisticsRecorder;
-
-  /**
-   * @return the tableBlock
-   */
-  public AbstractIndex getDataBlock() {
-    return blockIndex;
-  }
-
-  /**
-   * list of dimension selected for in query
-   */
-  private QueryDimension[] queryDimensions;
-
-  /**
-   * list of measure selected in query
-   */
-  private QueryMeasure[] queryMeasures;
-
-  /**
-   * @param blockIndex the tableBlock to set
-   */
-  public void setDataBlock(AbstractIndex blockIndex) {
-    this.blockIndex = blockIndex;
-  }
-
-  /**
-   * @return the isFixedKeyUpdateRequired
-   */
-  public boolean isFixedKeyUpdateRequired() {
-    return isFixedKeyUpdateRequired;
-  }
-
-  /**
-   * @param isFixedKeyUpdateRequired the isFixedKeyUpdateRequired to set
-   */
-  public void setFixedKeyUpdateRequired(boolean isFixedKeyUpdateRequired) {
-    this.isFixedKeyUpdateRequired = isFixedKeyUpdateRequired;
-  }
-
-  /**
-   * @return the isFileBasedQuery
-   */
-  public boolean isFileBasedQuery() {
-    return isFileBasedQuery;
-  }
-
-  /**
-   * @param isFileBasedQuery the isFileBasedQuery to set
-   */
-  public void setFileBasedQuery(boolean isFileBasedQuery) {
-    this.isFileBasedQuery = isFileBasedQuery;
-  }
-
-  /**
-   * @return the queryId
-   */
-  public String getQueryId() {
-    return queryId;
-  }
-
-  /**
-   * @param queryId the queryId to set
-   */
-  public void setQueryId(String queryId) {
-    this.queryId = queryId;
-  }
-
-  /**
-   * @return the limit
-   */
-  public int getLimit() {
-    return limit;
-  }
-
-  /**
-   * @param limit the limit to set
-   */
-  public void setLimit(int limit) {
-    this.limit = limit;
-  }
-
-  /**
-   * @return the aggregatorInfos
-   */
-  public AggregatorInfo getAggregatorInfo() {
-    return aggregatorInfo;
-  }
-
-  /**
-   * @param aggregatorInfo the aggregatorInfos to set
-   */
-  public void setAggregatorInfo(AggregatorInfo aggregatorInfo) {
-    this.aggregatorInfo = aggregatorInfo;
-  }
-
-  /**
-   * @return the startKey
-   */
-  public IndexKey getStartKey() {
-    return startKey;
-  }
-
-  /**
-   * @param startKey the startKey to set
-   */
-  public void setStartKey(IndexKey startKey) {
-    this.startKey = startKey;
-  }
-
-  /**
-   * @return the endKey
-   */
-  public IndexKey getEndKey() {
-    return endKey;
-  }
-
-  /**
-   * @param endKey the endKey to set
-   */
-  public void setEndKey(IndexKey endKey) {
-    this.endKey = endKey;
-  }
-
-  /**
-   * @return the maskedByteForBlock
-   */
-  public int[] getMaskedByteForBlock() {
-    return maskedByteForBlock;
-  }
-
-
-
-  /**
-   * @param maskedByteForBlock the maskedByteForBlock to set
-   */
-  public void setMaskedByteForBlock(int[] maskedByteForBlock) {
-    this.maskedByteForBlock = maskedByteForBlock;
-  }
-
-  /**
-   * @return the isDetailQuery
-   */
-  public boolean isDetailQuery() {
-    return isDetailQuery;
-  }
-
-  /**
-   * @param isDetailQuery the isDetailQuery to set
-   */
-  public void setDetailQuery(boolean isDetailQuery) {
-    this.isDetailQuery = isDetailQuery;
-  }
-
-  /**
-   * @return the totalNumberDimensionBlock
-   */
-  public int getTotalNumberDimensionBlock() {
-    return totalNumberDimensionBlock;
-  }
-
-  /**
-   * @param totalNumberDimensionBlock the totalNumberDimensionBlock to set
-   */
-  public void setTotalNumberDimensionBlock(int totalNumberDimensionBlock) {
-    this.totalNumberDimensionBlock = totalNumberDimensionBlock;
-  }
-
-  /**
-   * @return the totalNumberOfMeasureBlock
-   */
-  public int getTotalNumberOfMeasureBlock() {
-    return totalNumberOfMeasureBlock;
-  }
-
-  /**
-   * @param totalNumberOfMeasureBlock the totalNumberOfMeasureBlock to set
-   */
-  public void setTotalNumberOfMeasureBlock(int totalNumberOfMeasureBlock) {
-    this.totalNumberOfMeasureBlock = totalNumberOfMeasureBlock;
-  }
-
-  /**
-   * @return the allSelectedDimensionBlocksIndexes
-   */
-  public int[] getAllSelectedDimensionBlocksIndexes() {
-    return allSelectedDimensionBlocksIndexes;
-  }
-
-  /**
-   * @param allSelectedDimensionBlocksIndexes the allSelectedDimensionBlocksIndexes to set
-   */
-  public void setAllSelectedDimensionBlocksIndexes(int[] allSelectedDimensionBlocksIndexes) {
-    this.allSelectedDimensionBlocksIndexes = allSelectedDimensionBlocksIndexes;
-  }
-
-  /**
-   * @return the allSelectedMeasureBlocksIndexes
-   */
-  public int[] getAllSelectedMeasureBlocksIndexes() {
-    return allSelectedMeasureBlocksIndexes;
-  }
-
-  /**
-   * @param allSelectedMeasureBlocksIndexes the allSelectedMeasureBlocksIndexes to set
-   */
-  public void setAllSelectedMeasureBlocksIndexes(int[] allSelectedMeasureBlocksIndexes) {
-    this.allSelectedMeasureBlocksIndexes = allSelectedMeasureBlocksIndexes;
-  }
-
-  /**
-   * @return the restructureInfos
-   */
-  public KeyStructureInfo getKeyStructureInfo() {
-    return keyStructureInfo;
-  }
-
-  /**
-   * @param keyStructureInfo the restructureInfos to set
-   */
-  public void setKeyStructureInfo(KeyStructureInfo keyStructureInfo) {
-    this.keyStructureInfo = keyStructureInfo;
-  }
-
-  /**
-   * @return the sortInfos
-   */
-  public SortInfo getSortInfo() {
-    return sortInfo;
-  }
-
-  /**
-   * @param sortInfo the sortInfos to set
-   */
-  public void setSortInfo(SortInfo sortInfo) {
-    this.sortInfo = sortInfo;
-  }
-
-  /**
-   * @return the firstDataBlock
-   */
-  public DataRefNode getFirstDataBlock() {
-    return firstDataBlock;
-  }
-
-  /**
-   * @param firstDataBlock the firstDataBlock to set
-   */
-  public void setFirstDataBlock(DataRefNode firstDataBlock) {
-    this.firstDataBlock = firstDataBlock;
-  }
-
-  /**
-   * @return the numberOfBlockToScan
-   */
-  public long getNumberOfBlockToScan() {
-    return numberOfBlockToScan;
-  }
-
-  /**
-   * @param numberOfBlockToScan the numberOfBlockToScan to set
-   */
-  public void setNumberOfBlockToScan(long numberOfBlockToScan) {
-    this.numberOfBlockToScan = numberOfBlockToScan;
-  }
-
-  /**
-   * @return the fixedLengthKeySize
-   */
-  public int getFixedLengthKeySize() {
-    return fixedLengthKeySize;
-  }
-
-  /**
-   * @param fixedLengthKeySize the fixedLengthKeySize to set
-   */
-  public void setFixedLengthKeySize(int fixedLengthKeySize) {
-    this.fixedLengthKeySize = fixedLengthKeySize;
-  }
-
-  /**
-   * @return the filterEvaluatorTree
-   */
-  public FilterExecuter getFilterExecuterTree() {
-    return filterExecuterTree;
-  }
-
-  /**
-   * @param filterExecuterTree the filterEvaluatorTree to set
-   */
-  public void setFilterExecuterTree(FilterExecuter filterExecuterTree) {
-    this.filterExecuterTree = filterExecuterTree;
-  }
-
-  /**
-   * @return the tableBlockKeyGenerator
-   */
-  public KeyGenerator getBlockKeyGenerator() {
-    return blockKeyGenerator;
-  }
-
-  /**
-   * @param tableBlockKeyGenerator the tableBlockKeyGenerator to set
-   */
-  public void setBlockKeyGenerator(KeyGenerator tableBlockKeyGenerator) {
-    this.blockKeyGenerator = tableBlockKeyGenerator;
-  }
-
-  /**
-   * @return the eachColumnValueSize
-   */
-  public int[] getEachColumnValueSize() {
-    return eachColumnValueSize;
-  }
-
-  /**
-   * @param eachColumnValueSize the eachColumnValueSize to set
-   */
-  public void setEachColumnValueSize(int[] eachColumnValueSize) {
-    this.eachColumnValueSize = eachColumnValueSize;
-  }
-
-  /**
-   * @return the partitionId
-   */
-  public String getPartitionId() {
-    return partitionId;
-  }
-
-  /**
-   * @param partitionId the partitionId to set
-   */
-  public void setPartitionId(String partitionId) {
-    this.partitionId = partitionId;
-  }
-
-  /**
-   * @return the dictionaryColumnBlockIndex
-   */
-  public int[] getDictionaryColumnBlockIndex() {
-    return dictionaryColumnBlockIndex;
-  }
-
-  /**
-   * @param dictionaryColumnBlockIndex the dictionaryColumnBlockIndex to set
-   */
-  public void setDictionaryColumnBlockIndex(int[] dictionaryColumnBlockIndex) {
-    this.dictionaryColumnBlockIndex = dictionaryColumnBlockIndex;
-  }
-
-  /**
-   * @return the noDictionaryBlockIndexes
-   */
-  public int[] getNoDictionaryBlockIndexes() {
-    return noDictionaryBlockIndexes;
-  }
-
-  /**
-   * @param noDictionaryBlockIndexes the noDictionaryBlockIndexes to set
-   */
-  public void setNoDictionaryBlockIndexes(int[] noDictionaryBlockIndexes) {
-    this.noDictionaryBlockIndexes = noDictionaryBlockIndexes;
-  }
-
-  /**
-   * @return the columnGroupToKeyStructureInfo
-   */
-  public Map<Integer, KeyStructureInfo> getColumnGroupToKeyStructureInfo() {
-    return columnGroupToKeyStructureInfo;
-  }
-
-  /**
-   * @param columnGroupToKeyStructureInfo the columnGroupToKeyStructureInfo to set
-   */
-  public void setColumnGroupToKeyStructureInfo(
-      Map<Integer, KeyStructureInfo> columnGroupToKeyStructureInfo) {
-    this.columnGroupToKeyStructureInfo = columnGroupToKeyStructureInfo;
-  }
-
-  /**
-   * @return the columnIdToDcitionaryMapping
-   */
-  public Map<String, Dictionary> getColumnIdToDcitionaryMapping() {
-    return columnIdToDcitionaryMapping;
-  }
-
-  /**
-   * @param columnIdToDcitionaryMapping the columnIdToDcitionaryMapping to set
-   */
-  public void setColumnIdToDcitionaryMapping(Map<String, Dictionary> columnIdToDcitionaryMapping) {
-    this.columnIdToDcitionaryMapping = columnIdToDcitionaryMapping;
-  }
-
-  /**
-   * @return the fileType
-   */
-  public FileType getFileType() {
-    return fileType;
-  }
-
-  /**
-   * @param fileType the fileType to set
-   */
-  public void setFileType(FileType fileType) {
-    this.fileType = fileType;
-  }
-
-  public boolean isRawRecordDetailQuery() {
-    return isRawRecordDetailQuery;
-  }
-
-  public void setRawRecordDetailQuery(boolean rawRecordDetailQuery) {
-    isRawRecordDetailQuery = rawRecordDetailQuery;
-  }
-
-  /**
-   * @return the complexParentIndexToQueryMap
-   */
-  public Map<Integer, GenericQueryType> getComlexDimensionInfoMap() {
-    return complexParentIndexToQueryMap;
-  }
-
-  /**
-   * @param complexDimensionInfoMap the complexParentIndexToQueryMap to set
-   */
-  public void setComplexDimensionInfoMap(Map<Integer, GenericQueryType> complexDimensionInfoMap) {
-    this.complexParentIndexToQueryMap = complexDimensionInfoMap;
-  }
-
-  /**
-   * @return the complexColumnParentBlockIndexes
-   */
-  public int[] getComplexColumnParentBlockIndexes() {
-    return complexColumnParentBlockIndexes;
-  }
-
-  /**
-   * @param complexColumnParentBlockIndexes the complexColumnParentBlockIndexes to set
-   */
-  public void setComplexColumnParentBlockIndexes(int[] complexColumnParentBlockIndexes) {
-    this.complexColumnParentBlockIndexes = complexColumnParentBlockIndexes;
-  }
-
-  public QueryStatisticsRecorder getStatisticsRecorder() {
-    return statisticsRecorder;
-  }
-
-  public void setStatisticsRecorder(QueryStatisticsRecorder statisticsRecorder) {
-    this.statisticsRecorder = statisticsRecorder;
-  }
-
-  public QueryDimension[] getQueryDimensions() {
-    return queryDimensions;
-  }
-
-  public void setQueryDimensions(QueryDimension[] queryDimensions) {
-    this.queryDimensions = queryDimensions;
-  }
-
-  public QueryMeasure[] getQueryMeasures() {
-    return queryMeasures;
-  }
-
-  public void setQueryMeasures(QueryMeasure[] queryMeasures) {
-    this.queryMeasures = queryMeasures;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/infos/KeyStructureInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/infos/KeyStructureInfo.java b/core/src/main/java/org/carbondata/scan/executor/infos/KeyStructureInfo.java
deleted file mode 100644
index a595f33..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/infos/KeyStructureInfo.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor.infos;
-
-import org.carbondata.core.keygenerator.KeyGenerator;
-
-/**
- * Below class will store the structure of the key
- * used during query execution
- */
-public class KeyStructureInfo {
-
-  /**
-   * it's actually a latest key generator
-   * last table block as this key generator will be used to
-   * to update the mdkey of the older slice with the new slice
-   */
-  private KeyGenerator keyGenerator;
-
-  /**
-   * mask bytes ranges for the query
-   */
-  private int[] maskByteRanges;
-
-  /**
-   * masked bytes of the query
-   */
-  private int[] maskedBytes;
-
-  /**
-   * max key for query execution
-   */
-  private byte[] maxKey;
-
-  /**
-   * dimension ordinals inside the column group
-   */
-  private int[] mdkeyQueryDimensionOrdinal;
-
-  /**
-   * @return the keyGenerator
-   */
-  public KeyGenerator getKeyGenerator() {
-    return keyGenerator;
-  }
-
-  /**
-   * @param keyGenerator the keyGenerator to set
-   */
-  public void setKeyGenerator(KeyGenerator keyGenerator) {
-    this.keyGenerator = keyGenerator;
-  }
-
-  /**
-   * @return the maskByteRanges
-   */
-  public int[] getMaskByteRanges() {
-    return maskByteRanges;
-  }
-
-  /**
-   * @param maskByteRanges the maskByteRanges to set
-   */
-  public void setMaskByteRanges(int[] maskByteRanges) {
-    this.maskByteRanges = maskByteRanges;
-  }
-
-  /**
-   * @return the maskedBytes
-   */
-  public int[] getMaskedBytes() {
-    return maskedBytes;
-  }
-
-  /**
-   * @param maskedBytes the maskedBytes to set
-   */
-  public void setMaskedBytes(int[] maskedBytes) {
-    this.maskedBytes = maskedBytes;
-  }
-
-  /**
-   * @return the maxKey
-   */
-  public byte[] getMaxKey() {
-    return maxKey;
-  }
-
-  /**
-   * @param maxKey the maxKey to set
-   */
-  public void setMaxKey(byte[] maxKey) {
-    this.maxKey = maxKey;
-  }
-
-  public int[] getMdkeyQueryDimensionOrdinal() {
-    return mdkeyQueryDimensionOrdinal;
-  }
-
-  public void setMdkeyQueryDimensionOrdinal(int[] mdkeyQueryDimensionOrdinal) {
-    this.mdkeyQueryDimensionOrdinal = mdkeyQueryDimensionOrdinal;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/infos/SortInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/infos/SortInfo.java b/core/src/main/java/org/carbondata/scan/executor/infos/SortInfo.java
deleted file mode 100644
index 53584f5..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/infos/SortInfo.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor.infos;
-
-import java.util.List;
-
-import org.carbondata.scan.model.QueryDimension;
-
-/**
- * Below class holds the order by information about the query
- */
-public class SortInfo {
-
-  /**
-   * sorting order of a dimension
-   */
-  private byte[] dimensionSortOrder;
-
-  /**
-   * byte range of each dimension present in the order by
-   */
-  private int[][] maskedByteRangeForSorting;
-
-  /**
-   * dimension indexes which is used in order bye
-   */
-  private byte[] sortDimensionIndex;
-
-  /**
-   * mask key of each dimension
-   * this will be used to sort the dimension
-   */
-  private byte[][] dimensionMaskKeyForSorting;
-
-  /**
-   * sortDimension
-   */
-  private List<QueryDimension> sortDimension;
-
-  /**
-   * @return the dimensionSortOrder
-   */
-  public byte[] getDimensionSortOrder() {
-    return dimensionSortOrder;
-  }
-
-  /**
-   * @param dimensionSortOrder the dimensionSortOrder to set
-   */
-  public void setDimensionSortOrder(byte[] dimensionSortOrder) {
-    this.dimensionSortOrder = dimensionSortOrder;
-  }
-
-  /**
-   * @return the maskedByteRangeForSorting
-   */
-  public int[][] getMaskedByteRangeForSorting() {
-    return maskedByteRangeForSorting;
-  }
-
-  /**
-   * @param maskedByteRangeForSorting the maskedByteRangeForSorting to set
-   */
-  public void setMaskedByteRangeForSorting(int[][] maskedByteRangeForSorting) {
-    this.maskedByteRangeForSorting = maskedByteRangeForSorting;
-  }
-
-  /**
-   * @return the sortDimensionIndex
-   */
-  public byte[] getSortDimensionIndex() {
-    return sortDimensionIndex;
-  }
-
-  /**
-   * @param sortDimensionIndex the sortDimensionIndex to set
-   */
-  public void setSortDimensionIndex(byte[] sortDimensionIndex) {
-    this.sortDimensionIndex = sortDimensionIndex;
-  }
-
-  /**
-   * @return the dimensionMaskKeyForSorting
-   */
-  public byte[][] getDimensionMaskKeyForSorting() {
-    return dimensionMaskKeyForSorting;
-  }
-
-  /**
-   * @param dimensionMaskKeyForSorting the dimensionMaskKeyForSorting to set
-   */
-  public void setDimensionMaskKeyForSorting(byte[][] dimensionMaskKeyForSorting) {
-    this.dimensionMaskKeyForSorting = dimensionMaskKeyForSorting;
-  }
-
-  /**
-   * @return the sortDimension
-   */
-  public List<QueryDimension> getSortDimension() {
-    return sortDimension;
-  }
-
-  /**
-   * @param sortDimension the sortDimension to set
-   */
-  public void setSortDimension(List<QueryDimension> sortDimension) {
-    this.sortDimension = sortDimension;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/util/QueryUtil.java b/core/src/main/java/org/carbondata/scan/executor/util/QueryUtil.java
deleted file mode 100644
index 2b26ba0..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/util/QueryUtil.java
+++ /dev/null
@@ -1,951 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor.util;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.carbondata.core.cache.Cache;
-import org.carbondata.core.cache.CacheProvider;
-import org.carbondata.core.cache.CacheType;
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.metadata.CarbonMetadata;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.table.CarbonTable;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.keygenerator.KeyGenException;
-import org.carbondata.core.keygenerator.KeyGenerator;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.core.util.CarbonUtilException;
-import org.carbondata.scan.complextypes.ArrayQueryType;
-import org.carbondata.scan.complextypes.PrimitiveQueryType;
-import org.carbondata.scan.complextypes.StructQueryType;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-import org.carbondata.scan.expression.ColumnExpression;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.filter.GenericQueryType;
-import org.carbondata.scan.filter.resolver.FilterResolverIntf;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.model.QueryDimension;
-import org.carbondata.scan.model.QueryMeasure;
-import org.carbondata.scan.model.QueryModel;
-
-import org.apache.commons.lang3.ArrayUtils;
-
-/**
- * Utility class for query execution
- */
-public class QueryUtil {
-
-  /**
-   * Below method will be used to get the masked byte range based on the query
-   * dimension. It will give the range in the mdkey. This will be used to get
-   * the actual key array from masked mdkey
-   *
-   * @param queryDimensions query dimension selected in query
-   * @param keyGenerator    key generator
-   * @return masked key
-   */
-  public static int[] getMaskedByteRange(List<QueryDimension> queryDimensions,
-      KeyGenerator keyGenerator) {
-    Set<Integer> byteRangeSet = new TreeSet<Integer>();
-    int[] byteRange = null;
-    for (int i = 0; i < queryDimensions.size(); i++) {
-
-      // as no dictionary column and complex type columns
-      // are not selected in the mdkey
-      // so we will not select the those dimension for calculating the
-      // range
-      if (queryDimensions.get(i).getDimension().getKeyOrdinal() == -1) {
-        continue;
-      }
-      // get the offset of the dimension in the mdkey
-      byteRange =
-          keyGenerator.getKeyByteOffsets(queryDimensions.get(i).getDimension().getKeyOrdinal());
-      for (int j = byteRange[0]; j <= byteRange[1]; j++) {
-        byteRangeSet.add(j);
-      }
-    }
-    int[] maksedByteRange = new int[byteRangeSet.size()];
-    int index = 0;
-    Iterator<Integer> iterator = byteRangeSet.iterator();
-    // add the masked byte range
-    while (iterator.hasNext()) {
-      maksedByteRange[index++] = iterator.next();
-    }
-    return maksedByteRange;
-  }
-
-  public static int[] getMaskedByteRangeBasedOrdinal(List<Integer> ordinals,
-      KeyGenerator keyGenerator) {
-    Set<Integer> byteRangeSet = new TreeSet<Integer>();
-    int[] byteRange = null;
-    for (int i = 0; i < ordinals.size(); i++) {
-
-      // get the offset of the dimension in the mdkey
-      byteRange = keyGenerator.getKeyByteOffsets(ordinals.get(i));
-      for (int j = byteRange[0]; j <= byteRange[1]; j++) {
-        byteRangeSet.add(j);
-      }
-    }
-    int[] maksedByteRange = new int[byteRangeSet.size()];
-    int index = 0;
-    Iterator<Integer> iterator = byteRangeSet.iterator();
-    // add the masked byte range
-    while (iterator.hasNext()) {
-      maksedByteRange[index++] = iterator.next();
-    }
-    return maksedByteRange;
-  }
-
-  /**
-   * Below method will return the max key based on the dimension ordinal
-   *
-   * @param keyOrdinalList
-   * @param generator
-   * @return
-   * @throws KeyGenException
-   */
-  public static byte[] getMaxKeyBasedOnOrinal(List<Integer> keyOrdinalList, KeyGenerator generator)
-      throws KeyGenException {
-    long[] max = new long[generator.getDimCount()];
-    Arrays.fill(max, 0L);
-
-    for (int i = 0; i < keyOrdinalList.size(); i++) {
-      // adding for dimension which is selected in query
-      max[keyOrdinalList.get(i)] = Long.MAX_VALUE;
-    }
-    return generator.generateKey(max);
-  }
-
-  /**
-   * To get the max key based on dimensions. i.e. all other dimensions will be
-   * set to 0 bits and the required query dimension will be masked with all
-   * LONG.MAX so that we can mask key and then compare while aggregating This
-   * can be useful during filter query when only few dimensions were selected
-   * out of row group
-   *
-   * @param queryDimensions dimension selected in query
-   * @param generator       key generator
-   * @return max key for dimension
-   * @throws KeyGenException if any problem while generating the key
-   */
-  public static byte[] getMaxKeyBasedOnDimensions(List<QueryDimension> queryDimensions,
-      KeyGenerator generator) throws KeyGenException {
-    long[] max = new long[generator.getDimCount()];
-    Arrays.fill(max, 0L);
-
-    for (int i = 0; i < queryDimensions.size(); i++) {
-      // as no dictionary column and complex type columns
-      // are not selected in the mdkey
-      // so we will not select the those dimension for calculating the
-      // range
-      if (queryDimensions.get(i).getDimension().getKeyOrdinal() == -1) {
-        continue;
-      }
-      // adding for dimension which is selected in query
-      max[queryDimensions.get(i).getDimension().getKeyOrdinal()] = Long.MAX_VALUE;
-    }
-
-    return generator.generateKey(max);
-  }
-
-  /**
-   * Below method will be used to get the masked key for query
-   *
-   * @param keySize         size of the masked key
-   * @param maskedKeyRanges masked byte range
-   * @return masked bytes
-   */
-  public static int[] getMaskedByte(int keySize, int[] maskedKeyRanges) {
-    int[] maskedKey = new int[keySize];
-    // all the non selected dimension will be filled with -1
-    Arrays.fill(maskedKey, -1);
-    for (int i = 0; i < maskedKeyRanges.length; i++) {
-      maskedKey[maskedKeyRanges[i]] = i;
-    }
-    return maskedKey;
-  }
-
-  /**
-   * Below method will be used to get the dimension block index in file based
-   * on query dimension
-   *
-   * @param queryDimensions                query dimension
-   * @param dimensionOrdinalToBlockMapping mapping of dimension block in file to query dimension
-   * @return block index of file
-   */
-  public static int[] getDimensionsBlockIndexes(List<QueryDimension> queryDimensions,
-      Map<Integer, Integer> dimensionOrdinalToBlockMapping,
-      List<CarbonDimension> customAggregationDimension) {
-    // using set as in row group columns will point to same block
-    Set<Integer> dimensionBlockIndex = new HashSet<Integer>();
-    int blockIndex = 0;
-    for (int i = 0; i < queryDimensions.size(); i++) {
-      blockIndex =
-          dimensionOrdinalToBlockMapping.get(queryDimensions.get(i).getDimension().getOrdinal());
-      dimensionBlockIndex.add(blockIndex);
-      if (queryDimensions.get(i).getDimension().numberOfChild() > 0) {
-        addChildrenBlockIndex(dimensionBlockIndex, queryDimensions.get(i).getDimension());
-      }
-    }
-    for (int i = 0; i < customAggregationDimension.size(); i++) {
-      blockIndex =
-          dimensionOrdinalToBlockMapping.get(customAggregationDimension.get(i).getOrdinal());
-      // not adding the children dimension as dimension aggregation
-      // is not push down in case of complex dimension
-      dimensionBlockIndex.add(blockIndex);
-    }
-    return ArrayUtils
-        .toPrimitive(dimensionBlockIndex.toArray(new Integer[dimensionBlockIndex.size()]));
-  }
-
-  /**
-   * Below method will be used to add the children block index
-   * this will be basically for complex dimension which will have children
-   *
-   * @param blockIndexes block indexes
-   * @param dimension    parent dimension
-   */
-  private static void addChildrenBlockIndex(Set<Integer> blockIndexes, CarbonDimension dimension) {
-    for (int i = 0; i < dimension.numberOfChild(); i++) {
-      addChildrenBlockIndex(blockIndexes, dimension.getListOfChildDimensions().get(i));
-      blockIndexes.add(dimension.getListOfChildDimensions().get(i).getOrdinal());
-    }
-  }
-
-  /**
-   * Below method will be used to get the dictionary mapping for all the
-   * dictionary encoded dimension present in the query
-   *
-   * @param queryDimensions            query dimension present in the query this will be used to
-   *                                   convert the result from surrogate key to actual data
-   * @param absoluteTableIdentifier    absolute table identifier
-   * @return dimension unique id to its dictionary map
-   * @throws QueryExecutionException
-   */
-  public static Map<String, Dictionary> getDimensionDictionaryDetail(
-      List<QueryDimension> queryDimensions,
-      Set<CarbonDimension> filterComplexDimensions,
-      AbsoluteTableIdentifier absoluteTableIdentifier) throws QueryExecutionException {
-    // to store dimension unique column id list, this is required as
-    // dimension can be present in
-    // query dimension, as well as some aggregation function will be applied
-    // in the same dimension
-    // so we need to get only one instance of dictionary
-    // direct dictionary skip is done only for the dictionary lookup
-    Set<String> dictionaryDimensionFromQuery = new HashSet<String>();
-    for (int i = 0; i < queryDimensions.size(); i++) {
-      List<Encoding> encodingList = queryDimensions.get(i).getDimension().getEncoder();
-      // TODO need to remove the data type check for parent column in complex type no need to
-      // write encoding dictionary
-      if (CarbonUtil.hasEncoding(encodingList, Encoding.DICTIONARY) && !CarbonUtil
-          .hasEncoding(encodingList, Encoding.DIRECT_DICTIONARY)) {
-
-        if (queryDimensions.get(i).getDimension().numberOfChild() == 0) {
-          dictionaryDimensionFromQuery.add(queryDimensions.get(i).getDimension().getColumnId());
-        }
-        if (queryDimensions.get(i).getDimension().numberOfChild() > 0) {
-          getChildDimensionDictionaryDetail(queryDimensions.get(i).getDimension(),
-              dictionaryDimensionFromQuery);
-        }
-      }
-    }
-    Iterator<CarbonDimension> iterator = filterComplexDimensions.iterator();
-    while (iterator.hasNext()) {
-      getChildDimensionDictionaryDetail(iterator.next(), dictionaryDimensionFromQuery);
-    }
-    // converting to list as api exposed needed list which i think
-    // is not correct
-    List<String> dictionaryColumnIdList =
-        new ArrayList<String>(dictionaryDimensionFromQuery.size());
-    dictionaryColumnIdList.addAll(dictionaryDimensionFromQuery);
-    return getDictionaryMap(dictionaryColumnIdList, absoluteTableIdentifier);
-  }
-
-  /**
-   * Below method will be used to fill the children dimension column id
-   *
-   * @param queryDimensions              query dimension
-   * @param dictionaryDimensionFromQuery dictionary dimension for query
-   */
-  private static void getChildDimensionDictionaryDetail(CarbonDimension queryDimensions,
-      Set<String> dictionaryDimensionFromQuery) {
-    for (int j = 0; j < queryDimensions.numberOfChild(); j++) {
-      List<Encoding> encodingList = queryDimensions.getListOfChildDimensions().get(j).getEncoder();
-      if (queryDimensions.getListOfChildDimensions().get(j).numberOfChild() > 0) {
-        getChildDimensionDictionaryDetail(queryDimensions.getListOfChildDimensions().get(j),
-            dictionaryDimensionFromQuery);
-      } else if (!CarbonUtil.hasEncoding(encodingList, Encoding.DIRECT_DICTIONARY)) {
-        dictionaryDimensionFromQuery
-            .add(queryDimensions.getListOfChildDimensions().get(j).getColumnId());
-      }
-    }
-  }
-
-  /**
-   * Below method will be used to get the column id to its dictionary mapping
-   *
-   * @param dictionaryColumnIdList  dictionary column list
-   * @param absoluteTableIdentifier absolute table identifier
-   * @return dictionary mapping
-   * @throws QueryExecutionException
-   */
-  private static Map<String, Dictionary> getDictionaryMap(List<String> dictionaryColumnIdList,
-      AbsoluteTableIdentifier absoluteTableIdentifier) throws QueryExecutionException {
-    // this for dictionary unique identifier
-    List<DictionaryColumnUniqueIdentifier> dictionaryColumnUniqueIdentifiers =
-        getDictionaryColumnUniqueIdentifierList(dictionaryColumnIdList,
-            absoluteTableIdentifier.getCarbonTableIdentifier());
-    CacheProvider cacheProvider = CacheProvider.getInstance();
-    Cache forwardDictionaryCache = cacheProvider
-        .createCache(CacheType.FORWARD_DICTIONARY, absoluteTableIdentifier.getStorePath());
-    List<Dictionary> columnDictionaryList = null;
-    try {
-      columnDictionaryList = forwardDictionaryCache.getAll(dictionaryColumnUniqueIdentifiers);
-    } catch (CarbonUtilException e) {
-      throw new QueryExecutionException(e);
-    }
-    Map<String, Dictionary> columnDictionaryMap = new HashMap<>(columnDictionaryList.size());
-    for (int i = 0; i < dictionaryColumnUniqueIdentifiers.size(); i++) {
-      // TODO: null check for column dictionary, if cache size is less it
-      // might return null here, in that case throw exception
-      columnDictionaryMap.put(dictionaryColumnIdList.get(i), columnDictionaryList.get(i));
-    }
-    return columnDictionaryMap;
-  }
-
-  /**
-   * Below method will be used to get the dictionary column unique identifier
-   *
-   * @param dictionaryColumnIdList dictionary
-   * @param carbonTableIdentifier
-   * @return
-   */
-  private static List<DictionaryColumnUniqueIdentifier> getDictionaryColumnUniqueIdentifierList(
-      List<String> dictionaryColumnIdList, CarbonTableIdentifier carbonTableIdentifier)
-      throws QueryExecutionException {
-    CarbonTable carbonTable =
-        CarbonMetadata.getInstance().getCarbonTable(carbonTableIdentifier.getTableUniqueName());
-    List<DictionaryColumnUniqueIdentifier> dictionaryColumnUniqueIdentifiers =
-        new ArrayList<>(dictionaryColumnIdList.size());
-    for (String columnId : dictionaryColumnIdList) {
-      CarbonDimension dimension = CarbonMetadata.getInstance()
-          .getCarbonDimensionBasedOnColIdentifier(carbonTable, columnId);
-      if (null == dimension) {
-        throw new QueryExecutionException("The column id " + columnId + " could not be resolved.");
-      }
-      DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier =
-          new DictionaryColumnUniqueIdentifier(carbonTableIdentifier,
-              dimension.getColumnIdentifier(), dimension.getDataType());
-      dictionaryColumnUniqueIdentifiers.add(dictionaryColumnUniqueIdentifier);
-    }
-    return dictionaryColumnUniqueIdentifiers;
-  }
-
-  /**
-   * Below method will used to get the method will be used to get the measure
-   * block indexes to be read from the file
-   *
-   * @param queryMeasures              query measure
-   * @param expressionMeasure          measure present in the expression
-   * @param ordinalToBlockIndexMapping measure ordinal to block mapping
-   * @return block indexes
-   */
-  public static int[] getMeasureBlockIndexes(List<QueryMeasure> queryMeasures,
-      List<CarbonMeasure> expressionMeasure, Map<Integer, Integer> ordinalToBlockIndexMapping) {
-    Set<Integer> measureBlockIndex = new HashSet<Integer>();
-    for (int i = 0; i < queryMeasures.size(); i++) {
-      measureBlockIndex
-          .add(ordinalToBlockIndexMapping.get(queryMeasures.get(i).getMeasure().getOrdinal()));
-    }
-    for (int i = 0; i < expressionMeasure.size(); i++) {
-      measureBlockIndex.add(ordinalToBlockIndexMapping.get(expressionMeasure.get(i).getOrdinal()));
-    }
-    return ArrayUtils.toPrimitive(measureBlockIndex.toArray(new Integer[measureBlockIndex.size()]));
-  }
-
-  /**
-   * Below method will be used to get the masked byte range for dimension
-   * which is present in order by
-   *
-   * @param orderByDimensions order by dimension
-   * @param generator         key generator
-   * @param maskedRanges      masked byte range for dimension
-   * @return range of masked byte for order by dimension
-   */
-  public static int[][] getMaskedByteRangeForSorting(List<QueryDimension> orderByDimensions,
-      KeyGenerator generator, int[] maskedRanges) {
-    int[][] dimensionCompareIndex = new int[orderByDimensions.size()][];
-    int index = 0;
-    for (int i = 0; i < dimensionCompareIndex.length; i++) {
-      Set<Integer> integers = new TreeSet<Integer>();
-      if (!orderByDimensions.get(i).getDimension().getEncoder().contains(Encoding.DICTIONARY)
-          || orderByDimensions.get(i).getDimension().numberOfChild() > 0) {
-        continue;
-      }
-      int[] range =
-          generator.getKeyByteOffsets(orderByDimensions.get(i).getDimension().getKeyOrdinal());
-      for (int j = range[0]; j <= range[1]; j++) {
-        integers.add(j);
-      }
-      dimensionCompareIndex[index] = new int[integers.size()];
-      int j = 0;
-      for (Iterator<Integer> iterator = integers.iterator(); iterator.hasNext(); ) {
-        Integer integer = (Integer) iterator.next();
-        dimensionCompareIndex[index][j++] = integer.intValue();
-      }
-      index++;
-    }
-    for (int i = 0; i < dimensionCompareIndex.length; i++) {
-      if (null == dimensionCompareIndex[i]) {
-        continue;
-      }
-      int[] range = dimensionCompareIndex[i];
-      if (null != range) {
-        for (int j = 0; j < range.length; j++) {
-          for (int k = 0; k < maskedRanges.length; k++) {
-            if (range[j] == maskedRanges[k]) {
-              range[j] = k;
-              break;
-            }
-          }
-        }
-      }
-
-    }
-    return dimensionCompareIndex;
-  }
-
-  /**
-   * Below method will be used to get the masked key for sorting
-   *
-   * @param orderDimensions           query dimension
-   * @param generator                 key generator
-   * @param maskedByteRangeForSorting masked byte range for sorting
-   * @param maskedRanges              masked range
-   * @return masked byte range
-   * @throws QueryExecutionException
-   */
-  public static byte[][] getMaksedKeyForSorting(List<QueryDimension> orderDimensions,
-      KeyGenerator generator, int[][] maskedByteRangeForSorting, int[] maskedRanges)
-      throws QueryExecutionException {
-    byte[][] maskedKey = new byte[orderDimensions.size()][];
-    byte[] mdKey = null;
-    long[] key = null;
-    byte[] maskedMdKey = null;
-    try {
-      if (null != maskedByteRangeForSorting) {
-        for (int i = 0; i < maskedByteRangeForSorting.length; i++) {
-          if (null == maskedByteRangeForSorting[i]) {
-            continue;
-          }
-          key = new long[generator.getDimCount()];
-          maskedKey[i] = new byte[maskedByteRangeForSorting[i].length];
-          key[orderDimensions.get(i).getDimension().getKeyOrdinal()] = Long.MAX_VALUE;
-          mdKey = generator.generateKey(key);
-          maskedMdKey = new byte[maskedRanges.length];
-          for (int k = 0; k < maskedMdKey.length; k++) { // CHECKSTYLE:OFF
-            // Approval
-            // No:Approval-V1R2C10_001
-            maskedMdKey[k] = mdKey[maskedRanges[k]];
-          }
-          for (int j = 0; j < maskedByteRangeForSorting[i].length; j++) {
-            maskedKey[i][j] = maskedMdKey[maskedByteRangeForSorting[i][j]];
-          }// CHECKSTYLE:ON
-
-        }
-      }
-    } catch (KeyGenException e) {
-      throw new QueryExecutionException(e);
-    }
-    return maskedKey;
-  }
-
-  /**
-   * Below method will be used to get mapping whether dimension is present in
-   * order by or not
-   *
-   * @param sortedDimensions sort dimension present in order by query
-   * @param queryDimensions  query dimension
-   * @return sort dimension indexes
-   */
-  public static byte[] getSortDimensionIndexes(List<QueryDimension> sortedDimensions,
-      List<QueryDimension> queryDimensions) {
-    byte[] sortedDims = new byte[queryDimensions.size()];
-    int indexOf = 0;
-    for (int i = 0; i < sortedDims.length; i++) {
-      indexOf = sortedDimensions.indexOf(queryDimensions.get(i));
-      if (indexOf > -1) {
-        sortedDims[i] = 1;
-      }
-    }
-    return sortedDims;
-  }
-
-  /**
-   * Below method will be used to get the mapping of block index and its
-   * restructuring info
-   *
-   * @param queryDimensions   query dimension from query model
-   * @param segmentProperties segment properties
-   * @return map of block index to its restructuring info
-   * @throws KeyGenException if problem while key generation
-   */
-  public static Map<Integer, KeyStructureInfo> getColumnGroupKeyStructureInfo(
-      List<QueryDimension> queryDimensions, SegmentProperties segmentProperties)
-      throws KeyGenException {
-    Map<Integer, KeyStructureInfo> rowGroupToItsRSInfo = new HashMap<Integer, KeyStructureInfo>();
-    // get column group id and its ordinal mapping of column group
-    Map<Integer, List<Integer>> columnGroupAndItsOrdinalMappingForQuery =
-        getColumnGroupAndItsOrdinalMapping(queryDimensions);
-    Map<Integer, KeyGenerator> columnGroupAndItsKeygenartor =
-        segmentProperties.getColumnGroupAndItsKeygenartor();
-
-    Iterator<Entry<Integer, List<Integer>>> iterator =
-        columnGroupAndItsOrdinalMappingForQuery.entrySet().iterator();
-    KeyStructureInfo restructureInfos = null;
-    while (iterator.hasNext()) {
-      Entry<Integer, List<Integer>> next = iterator.next();
-      KeyGenerator keyGenerator = columnGroupAndItsKeygenartor.get(next.getKey());
-      restructureInfos = new KeyStructureInfo();
-      // sort the ordinal
-      List<Integer> ordinal = next.getValue();
-      List<Integer> mdKeyOrdinal = new ArrayList<Integer>();
-      //Un sorted
-      List<Integer> mdKeyOrdinalForQuery = new ArrayList<Integer>();
-      for (Integer ord : ordinal) {
-        mdKeyOrdinal.add(segmentProperties.getColumnGroupMdKeyOrdinal(next.getKey(), ord));
-        mdKeyOrdinalForQuery.add(segmentProperties.getColumnGroupMdKeyOrdinal(next.getKey(), ord));
-      }
-      Collections.sort(mdKeyOrdinal);
-      // get the masked byte range for column group
-      int[] maskByteRanges = getMaskedByteRangeBasedOrdinal(mdKeyOrdinal, keyGenerator);
-      // max key for column group
-      byte[] maxKey = getMaxKeyBasedOnOrinal(mdKeyOrdinal, keyGenerator);
-      // get masked key for column group
-      int[] maksedByte = getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
-      restructureInfos.setKeyGenerator(keyGenerator);
-      restructureInfos.setMaskByteRanges(maskByteRanges);
-      restructureInfos.setMaxKey(maxKey);
-      restructureInfos.setMaskedBytes(maksedByte);
-      restructureInfos.setMdkeyQueryDimensionOrdinal(ArrayUtils
-          .toPrimitive(mdKeyOrdinalForQuery.toArray(new Integer[mdKeyOrdinalForQuery.size()])));
-      rowGroupToItsRSInfo
-          .put(segmentProperties.getDimensionOrdinalToBlockMapping().get(ordinal.get(0)),
-              restructureInfos);
-    }
-    return rowGroupToItsRSInfo;
-  }
-
-  /**
-   * return true if given key is found in array
-   *
-   * @param data
-   * @param key
-   * @return
-   */
-  public static boolean searchInArray(int[] data, int key) {
-    for (int i = 0; i < data.length; i++) {
-      if (key == data[i]) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Below method will be used to create a mapping of column group columns
-   * this mapping will have column group id to all the dimension ordinal
-   * present in the column group This mapping will be used during query
-   * execution, to create a mask key for the column group dimension which will
-   * be used in aggregation and filter query as column group dimension will be
-   * stored in bit level
-   */
-  private static Map<Integer, List<Integer>> getColumnGroupAndItsOrdinalMapping(
-      List<QueryDimension> origdimensions) {
-
-    List<QueryDimension> dimensions = new ArrayList<QueryDimension>(origdimensions.size());
-    dimensions.addAll(origdimensions);
-    /**
-     * sort based on column group id
-     */
-    Collections.sort(dimensions, new Comparator<QueryDimension>() {
-
-      @Override public int compare(QueryDimension o1, QueryDimension o2) {
-        return Integer
-            .compare(o1.getDimension().columnGroupId(), o2.getDimension().columnGroupId());
-      }
-    });
-    // list of row groups this will store all the row group column
-    Map<Integer, List<Integer>> columnGroupAndItsOrdinalsMapping =
-        new HashMap<Integer, List<Integer>>();
-    // to store a column group
-    List<Integer> currentColumnGroup = null;
-    // current index
-    int index = 0;
-    // previous column group to check all the column of row id has bee
-    // selected
-    int prvColumnGroupId = -1;
-    while (index < dimensions.size()) {
-      // if dimension group id is not zero and it is same as the previous
-      // column group id
-      // then we need to add ordinal of that column as it belongs to same
-      // column group
-      if (!dimensions.get(index).getDimension().isColumnar()
-          && dimensions.get(index).getDimension().columnGroupId() == prvColumnGroupId
-          && null != currentColumnGroup) {
-        currentColumnGroup.add(dimensions.get(index).getDimension().getOrdinal());
-      }
-
-      // if dimension is not a columnar then it is column group column
-      else if (!dimensions.get(index).getDimension().isColumnar()) {
-        currentColumnGroup = new ArrayList<Integer>();
-        columnGroupAndItsOrdinalsMapping
-            .put(dimensions.get(index).getDimension().columnGroupId(), currentColumnGroup);
-        currentColumnGroup.add(dimensions.get(index).getDimension().getOrdinal());
-      }
-      // update the row id every time,this is required to group the
-      // columns
-      // of the same row group
-      prvColumnGroupId = dimensions.get(index).getDimension().columnGroupId();
-      index++;
-    }
-    return columnGroupAndItsOrdinalsMapping;
-  }
-
-  /**
-   * Below method will be used to get masked byte
-   *
-   * @param data           actual data
-   * @param maxKey         max key
-   * @param maskByteRanges mask byte range
-   * @param byteCount
-   * @return masked byte
-   */
-  public static byte[] getMaskedKey(byte[] data, byte[] maxKey, int[] maskByteRanges,
-      int byteCount) {
-    byte[] maskedKey = new byte[byteCount];
-    int counter = 0;
-    int byteRange = 0;
-    for (int i = 0; i < byteCount; i++) {
-      byteRange = maskByteRanges[i];
-      if (byteRange != -1) {
-        maskedKey[counter++] = (byte) (data[byteRange] & maxKey[byteRange]);
-      }
-    }
-    return maskedKey;
-  }
-
-  /**
-   * Below method will be used to fill block indexes of the query dimension
-   * which will be used in creating a output row Here is method we are passing
-   * two list which store the indexes one for dictionary column other for not
-   * dictionary column. This is done for specific purpose so that in one
-   * iteration we will be able to fill both type dimension block indexes
-   *
-   * @param queryDimensions                  dimension present in the query
-   * @param columnOrdinalToBlockIndexMapping column ordinal to block index mapping
-   * @param dictionaryDimensionBlockIndex    list to store dictionary column block indexes
-   * @param noDictionaryDimensionBlockIndex  list to store no dictionary block indexes
-   */
-  public static void fillQueryDimensionsBlockIndexes(List<QueryDimension> queryDimensions,
-      Map<Integer, Integer> columnOrdinalToBlockIndexMapping,
-      Set<Integer> dictionaryDimensionBlockIndex, List<Integer> noDictionaryDimensionBlockIndex) {
-    for (QueryDimension queryDimension : queryDimensions) {
-      if (CarbonUtil.hasEncoding(queryDimension.getDimension().getEncoder(), Encoding.DICTIONARY)
-          && queryDimension.getDimension().numberOfChild() == 0) {
-        dictionaryDimensionBlockIndex
-            .add(columnOrdinalToBlockIndexMapping.get(queryDimension.getDimension().getOrdinal()));
-      } else if (queryDimension.getDimension().numberOfChild() == 0) {
-        noDictionaryDimensionBlockIndex
-            .add(columnOrdinalToBlockIndexMapping.get(queryDimension.getDimension().getOrdinal()));
-      }
-    }
-  }
-
-  /**
-   * Below method will be used to resolve the query model
-   * resolve will be setting the actual dimension and measure object
-   * as from driver only column name will be passes to avoid the heavy object
-   * serialization
-   *
-   * @param queryModel query model
-   */
-  public static void resolveQueryModel(QueryModel queryModel) {
-    CarbonMetadata.getInstance().addCarbonTable(queryModel.getTable());
-    // TODO need to load the table from table identifier
-    CarbonTable carbonTable = queryModel.getTable();
-    String tableName =
-        queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName();
-    // resolve query dimension
-    for (QueryDimension queryDimension : queryModel.getQueryDimension()) {
-      queryDimension
-          .setDimension(carbonTable.getDimensionByName(tableName, queryDimension.getColumnName()));
-    }
-    // resolve sort dimension
-    for (QueryDimension sortDimension : queryModel.getSortDimension()) {
-      sortDimension
-          .setDimension(carbonTable.getDimensionByName(tableName, sortDimension.getColumnName()));
-    }
-    // resolve query measure
-    for (QueryMeasure queryMeasure : queryModel.getQueryMeasures()) {
-      // in case of count start column name will  be count * so
-      // first need to check any measure is present or not and as if measure
-      // if measure is present and if first measure is not a default
-      // measure than add measure otherwise
-      // than add first dimension as a measure
-      //as currently if measure is not present then
-      //we are adding default measure so first condition will
-      //never come false but if in future we can remove so not removing first if check
-      if (queryMeasure.getColumnName().equals("count(*)")) {
-        if (carbonTable.getMeasureByTableName(tableName).size() > 0 && !carbonTable
-            .getMeasureByTableName(tableName).get(0).getColName()
-            .equals(CarbonCommonConstants.DEFAULT_INVISIBLE_DUMMY_MEASURE)) {
-          queryMeasure.setMeasure(carbonTable.getMeasureByTableName(tableName).get(0));
-        } else {
-          CarbonMeasure dummyMeasure = new CarbonMeasure(
-              carbonTable.getDimensionByTableName(tableName).get(0).getColumnSchema(), 0);
-          queryMeasure.setMeasure(dummyMeasure);
-        }
-      } else {
-        queryMeasure
-            .setMeasure(carbonTable.getMeasureByName(tableName, queryMeasure.getColumnName()));
-      }
-    }
-  }
-
-  /**
-   * Below method will be used to get the index of number type aggregator
-   *
-   * @param aggType
-   * @return index in aggregator
-   */
-  public static int[] getNumberTypeIndex(List<String> aggType) {
-    List<Integer> indexList = new ArrayList<Integer>();
-    for (int i = 0; i < aggType.size(); i++) {
-      if (CarbonCommonConstants.SUM.equals(aggType.get(i)) || CarbonCommonConstants.AVERAGE
-          .equals(aggType.get(i))) {
-        indexList.add(i);
-      }
-    }
-    return ArrayUtils.toPrimitive(indexList.toArray(new Integer[indexList.size()]));
-  }
-
-  /**
-   * below method will be used to get the actual type aggregator
-   *
-   * @param aggType
-   * @return index in aggrgetor
-   */
-  public static int[] getActualTypeIndex(List<String> aggType) {
-    List<Integer> indexList = new ArrayList<Integer>();
-    for (int i = 0; i < aggType.size(); i++) {
-      if (!CarbonCommonConstants.SUM.equals(aggType.get(i)) && !CarbonCommonConstants.AVERAGE
-          .equals(aggType.get(i))) {
-        indexList.add(i);
-      }
-    }
-    return ArrayUtils.toPrimitive(indexList.toArray(new Integer[indexList.size()]));
-  }
-
-  /**
-   * Below method will be used to get the key structure for the column group
-   *
-   * @param segmentProperties      segment properties
-   * @param dimColumnEvaluatorInfo dimension evaluator info
-   * @return key structure info for column group dimension
-   * @throws KeyGenException
-   */
-  public static KeyStructureInfo getKeyStructureInfo(SegmentProperties segmentProperties,
-      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo) throws KeyGenException {
-    int colGrpId = getColumnGroupId(segmentProperties, dimColumnEvaluatorInfo.getColumnIndex());
-    KeyGenerator keyGenerator = segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
-    List<Integer> mdKeyOrdinal = new ArrayList<Integer>();
-
-    mdKeyOrdinal.add(segmentProperties
-        .getColumnGroupMdKeyOrdinal(colGrpId, dimColumnEvaluatorInfo.getColumnIndex()));
-    int[] maskByteRanges = QueryUtil.getMaskedByteRangeBasedOrdinal(mdKeyOrdinal, keyGenerator);
-    byte[] maxKey = QueryUtil.getMaxKeyBasedOnOrinal(mdKeyOrdinal, keyGenerator);
-    int[] maksedByte = QueryUtil.getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
-    KeyStructureInfo restructureInfos = new KeyStructureInfo();
-    restructureInfos.setKeyGenerator(keyGenerator);
-    restructureInfos.setMaskByteRanges(maskByteRanges);
-    restructureInfos.setMaxKey(maxKey);
-    restructureInfos.setMaskedBytes(maksedByte);
-    return restructureInfos;
-  }
-
-  /**
-   * Below method will be used to get the column group id based on the ordinal
-   *
-   * @param segmentProperties segment properties
-   * @param ordinal           ordinal to be searched
-   * @return column group id
-   */
-  public static int getColumnGroupId(SegmentProperties segmentProperties, int ordinal) {
-    int[][] columnGroups = segmentProperties.getColumnGroups();
-    int colGrpId = -1;
-    for (int i = 0; i < columnGroups.length; i++) {
-      if (columnGroups[i].length > 1) {
-        colGrpId++;
-        if (QueryUtil.searchInArray(columnGroups[i], ordinal)) {
-          break;
-        }
-      }
-    }
-    return colGrpId;
-  }
-
-  /**
-   * Below method will be used to get the map of for complex dimension and its type
-   * which will be used to during query execution to
-   *
-   * @param queryDimensions          complex dimension in query
-   * @param dimensionToBlockIndexMap dimension to block index in file map
-   * @return complex dimension and query type
-   */
-  public static Map<Integer, GenericQueryType> getComplexDimensionsMap(
-      List<QueryDimension> queryDimensions, Map<Integer, Integer> dimensionToBlockIndexMap,
-      int[] eachComplexColumnValueSize, Map<String, Dictionary> columnIdToDictionaryMap,
-      Set<CarbonDimension> filterDimensions) {
-    Map<Integer, GenericQueryType> complexTypeMap = new HashMap<Integer, GenericQueryType>();
-    for (QueryDimension dimension : queryDimensions) {
-      CarbonDimension actualDimension = dimension.getDimension();
-      if (actualDimension.getNumberOfChild() == 0) {
-        continue;
-      }
-      fillParentDetails(dimensionToBlockIndexMap, actualDimension, complexTypeMap,
-          eachComplexColumnValueSize, columnIdToDictionaryMap);
-    }
-    if (null != filterDimensions) {
-      for (CarbonDimension filterDimension : filterDimensions) {
-        fillParentDetails(dimensionToBlockIndexMap, filterDimension, complexTypeMap,
-            eachComplexColumnValueSize, columnIdToDictionaryMap);
-      }
-    }
-    return complexTypeMap;
-  }
-
-  private static GenericQueryType fillParentDetails(Map<Integer, Integer> dimensionToBlockIndexMap,
-      CarbonDimension dimension, Map<Integer, GenericQueryType> complexTypeMap,
-      int[] eachComplexColumnValueSize, Map<String, Dictionary> columnIdToDictionaryMap) {
-    int parentBlockIndex = dimensionToBlockIndexMap.get(dimension.getOrdinal());
-    GenericQueryType parentQueryType = dimension.getDataType().equals(DataType.ARRAY) ?
-        new ArrayQueryType(dimension.getColName(), dimension.getColName(), parentBlockIndex) :
-        new StructQueryType(dimension.getColName(), dimension.getColName(),
-            dimensionToBlockIndexMap.get(dimension.getOrdinal()));
-    complexTypeMap.put(dimension.getOrdinal(), parentQueryType);
-    parentBlockIndex =
-        fillChildrenDetails(eachComplexColumnValueSize, columnIdToDictionaryMap, parentBlockIndex,
-            dimension, parentQueryType);
-    return parentQueryType;
-  }
-
-  private static int fillChildrenDetails(int[] eachComplexColumnValueSize,
-      Map<String, Dictionary> columnIdToDictionaryMap, int parentBlockIndex,
-      CarbonDimension dimension, GenericQueryType parentQueryType) {
-    for (int i = 0; i < dimension.getNumberOfChild(); i++) {
-      switch (dimension.getListOfChildDimensions().get(i).getDataType()) {
-        case ARRAY:
-          parentQueryType.addChildren(
-              new ArrayQueryType(dimension.getListOfChildDimensions().get(i).getColName(),
-                  dimension.getColName(), ++parentBlockIndex));
-          break;
-        case STRUCT:
-          parentQueryType.addChildren(
-              new StructQueryType(dimension.getListOfChildDimensions().get(i).getColName(),
-                  dimension.getColName(), ++parentBlockIndex));
-          break;
-        default:
-          boolean isDirectDictionary = CarbonUtil
-              .hasEncoding(dimension.getListOfChildDimensions().get(i).getEncoder(),
-                  Encoding.DIRECT_DICTIONARY);
-          parentQueryType.addChildren(
-              new PrimitiveQueryType(dimension.getListOfChildDimensions().get(i).getColName(),
-                  dimension.getColName(), ++parentBlockIndex,
-                  dimension.getListOfChildDimensions().get(i).getDataType(),
-                  eachComplexColumnValueSize[dimension.getListOfChildDimensions().get(i)
-                      .getComplexTypeOrdinal()], columnIdToDictionaryMap
-                  .get(dimension.getListOfChildDimensions().get(i).getColumnId()),
-                  isDirectDictionary));
-      }
-      if (dimension.getListOfChildDimensions().get(i).getNumberOfChild() > 0) {
-        parentBlockIndex = fillChildrenDetails(eachComplexColumnValueSize, columnIdToDictionaryMap,
-            parentBlockIndex, dimension.getListOfChildDimensions().get(i), parentQueryType);
-      }
-    }
-    return parentBlockIndex;
-  }
-
-  public static Set<CarbonDimension> getAllFilterDimensions(FilterResolverIntf filterResolverTree) {
-    Set<CarbonDimension> filterDimensions = new HashSet<CarbonDimension>();
-    if (null == filterResolverTree) {
-      return filterDimensions;
-    }
-    List<ColumnExpression> dimensionResolvedInfos = new ArrayList<ColumnExpression>();
-    Expression filterExpression = filterResolverTree.getFilterExpression();
-    addColumnDimensions(filterExpression, filterDimensions);
-    for (ColumnExpression info : dimensionResolvedInfos) {
-      if (info.isDimension() && info.getDimension().getNumberOfChild() > 0) {
-        filterDimensions.add(info.getDimension());
-      }
-    }
-    return filterDimensions;
-
-  }
-
-  /**
-   * This method will check if a given expression contains a column expression
-   * recursively and add the dimension instance to the set which holds the dimension
-   * instances of the complex filter expressions.
-   *
-   * @param filterDimensions
-   * @return
-   */
-  private static void addColumnDimensions(Expression expression,
-      Set<CarbonDimension> filterDimensions) {
-    if (null != expression && expression instanceof ColumnExpression
-        && ((ColumnExpression) expression).isDimension()) {
-      filterDimensions.add(((ColumnExpression) expression).getDimension());
-      return;
-    }
-    for (Expression child : expression.getChildren()) {
-      addColumnDimensions(child, filterDimensions);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/executor/util/RestructureUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/util/RestructureUtil.java b/core/src/main/java/org/carbondata/scan/executor/util/RestructureUtil.java
deleted file mode 100644
index e1dafe1..0000000
--- a/core/src/main/java/org/carbondata/scan/executor/util/RestructureUtil.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.executor.util;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.scan.executor.infos.AggregatorInfo;
-import org.carbondata.scan.model.QueryDimension;
-import org.carbondata.scan.model.QueryMeasure;
-
-/**
- * Utility class for restructuring
- */
-public class RestructureUtil {
-
-  /**
-   * Below method will be used to get the updated query dimension updation
-   * means, after restructuring some dimension will be not present in older
-   * table blocks in that case we need to select only those dimension out of
-   * query dimension which is present in the current table block
-   *
-   * @param queryDimensions
-   * @param tableBlockDimensions
-   * @return list of query dimension which is present in the table block
-   */
-  public static List<QueryDimension> getUpdatedQueryDimension(List<QueryDimension> queryDimensions,
-      List<CarbonDimension> tableBlockDimensions, List<CarbonDimension> tableComplexDimension) {
-    List<QueryDimension> presentDimension =
-        new ArrayList<QueryDimension>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    // selecting only those dimension which is present in the query
-    for (QueryDimension queryDimimension : queryDimensions) {
-      for (CarbonDimension tableDimension : tableBlockDimensions) {
-        if (tableDimension.equals(queryDimimension.getDimension())) {
-          presentDimension.add(queryDimimension);
-        }
-      }
-    }
-    for (QueryDimension queryDimimension : queryDimensions) {
-      for (CarbonDimension tableDimension : tableComplexDimension) {
-        if (tableDimension.equals(queryDimimension.getDimension())) {
-          presentDimension.add(queryDimimension);
-        }
-      }
-    }
-    return presentDimension;
-  }
-
-  /**
-   * Below method is to add dimension children for complex type dimension as
-   * internally we are creating dimension column for each each complex
-   * dimension so when complex query dimension request will come in the query,
-   * we need to add its children as it is hidden from the user For example if
-   * complex dimension is of Array of String[2] so we are storing 3 dimension
-   * and when user will query for complex type i.e. array type we need to add
-   * its children and then we will read respective block and create a tuple
-   * based on all three dimension
-   *
-   * @param queryDimensions      current query dimensions
-   * @param tableBlockDimensions dimensions which is present in the table block
-   * @return updated dimension(after adding complex type children)
-   */
-  public static List<CarbonDimension> addChildrenForComplexTypeDimension(
-      List<CarbonDimension> queryDimensions, List<CarbonDimension> tableBlockDimensions) {
-    List<CarbonDimension> updatedQueryDimension = new ArrayList<CarbonDimension>();
-    int numberOfChildren = 0;
-    for (CarbonDimension queryDimension : queryDimensions) {
-      // if number of child is zero, then it is not a complex dimension
-      // so directly add it query dimension
-      if (queryDimension.numberOfChild() == 0) {
-        updatedQueryDimension.add(queryDimension);
-      }
-      // if number of child is more than 1 then add all its children
-      numberOfChildren = queryDimension.getOrdinal() + queryDimension.numberOfChild();
-      for (int j = queryDimension.getOrdinal(); j < numberOfChildren; j++) {
-        updatedQueryDimension.add(tableBlockDimensions.get(j));
-      }
-    }
-    return updatedQueryDimension;
-  }
-
-  /**
-   * Below method will be used to get the aggregator info object
-   * in this method some of the properties which will be extracted
-   * from query measure and current block measures will be set
-   *
-   * @param queryMeasures        measures present in query
-   * @param currentBlockMeasures current block measures
-   * @return aggregator info
-   */
-  public static AggregatorInfo getAggregatorInfos(List<QueryMeasure> queryMeasures,
-      List<CarbonMeasure> currentBlockMeasures) {
-    AggregatorInfo aggregatorInfos = new AggregatorInfo();
-    int numberOfMeasureInQuery = queryMeasures.size();
-    int[] measureOrdinals = new int[numberOfMeasureInQuery];
-    Object[] defaultValues = new Object[numberOfMeasureInQuery];
-    boolean[] measureExistsInCurrentBlock = new boolean[numberOfMeasureInQuery];
-    int index = 0;
-    for (QueryMeasure queryMeasure : queryMeasures) {
-      measureOrdinals[index] = queryMeasure.getMeasure().getOrdinal();
-      // if query measure exists in current dimension measures
-      // then setting measure exists is true
-      // otherwise adding a default value of a measure
-      if (currentBlockMeasures.contains(queryMeasure.getMeasure())) {
-        measureExistsInCurrentBlock[index] = true;
-      } else {
-        defaultValues[index] = queryMeasure.getMeasure().getDefaultValue();
-      }
-      index++;
-    }
-    aggregatorInfos.setDefaultValues(defaultValues);
-    aggregatorInfos.setMeasureOrdinals(measureOrdinals);
-    aggregatorInfos.setMeasureExists(measureExistsInCurrentBlock);
-    return aggregatorInfos;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/BinaryExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/BinaryExpression.java b/core/src/main/java/org/carbondata/scan/expression/BinaryExpression.java
deleted file mode 100644
index 1ad334e..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/BinaryExpression.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression;
-
-public abstract class BinaryExpression extends Expression {
-
-  /**
-   *
-   */
-  private static final long serialVersionUID = 1L;
-  /**
-   *
-   */
-
-  protected Expression left;
-  protected Expression right;
-  protected boolean isRangeExpression;
-
-  public BinaryExpression(Expression left, Expression right) {
-    this.left = left;
-    this.right = right;
-    children.add(left);
-    children.add(right);
-  }
-
-  public Expression getLeft() {
-    return left;
-  }
-
-  public Expression getRight() {
-    return right;
-  }
-
-  public boolean isRangeExpression() {
-    return isRangeExpression;
-  }
-
-  public void setRangeExpression(boolean isRangeExpression) {
-    this.isRangeExpression = isRangeExpression;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/ColumnExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/ColumnExpression.java b/core/src/main/java/org/carbondata/scan/expression/ColumnExpression.java
deleted file mode 100644
index 7263f94..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/ColumnExpression.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonColumn;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public class ColumnExpression extends LeafExpression {
-
-  private static final long serialVersionUID = 1L;
-
-  private String columnName;
-
-  private boolean isDimension;
-
-  private int colIndex = -1;
-
-  private DataType dataType;
-
-  private CarbonDimension dimension;
-
-  private CarbonColumn carbonColumn;
-
-  public ColumnExpression(String columnName, DataType dataType) {
-    this.columnName = columnName;
-    this.dataType = dataType;
-
-  }
-
-  public CarbonDimension getDimension() {
-    return dimension;
-  }
-
-  public void setDimension(CarbonDimension dimension) {
-    this.dimension = dimension;
-  }
-
-  public String getColumnName() {
-    return columnName;
-  }
-
-  public void setColumnName(String columnName) {
-    this.columnName = columnName;
-  }
-
-  public boolean isDimension() {
-    return isDimension;
-  }
-
-  public void setDimension(boolean isDimension) {
-    this.isDimension = isDimension;
-  }
-
-  public int getColIndex() {
-    return colIndex;
-  }
-
-  public void setColIndex(int colIndex) {
-    this.colIndex = colIndex;
-  }
-
-  public DataType getDataType() {
-    return dataType;
-  }
-
-  public void setDataType(DataType dataType) {
-    this.dataType = dataType;
-  }
-
-  @Override public ExpressionResult evaluate(RowIntf value) {
-    ExpressionResult expressionResult =
-        new ExpressionResult(dataType, (null == value ? null : value.getVal(colIndex)));
-    return expressionResult;
-  }
-
-  @Override public ExpressionType getFilterExpressionType() {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override public String getString() {
-    // TODO Auto-generated method stub
-    return "ColumnExpression(" + columnName + ')';
-  }
-
-  public CarbonColumn getCarbonColumn() {
-    return carbonColumn;
-  }
-
-  public void setCarbonColumn(CarbonColumn carbonColumn) {
-    this.carbonColumn = carbonColumn;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/expression/Expression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/expression/Expression.java b/core/src/main/java/org/carbondata/scan/expression/Expression.java
deleted file mode 100644
index 31af903..0000000
--- a/core/src/main/java/org/carbondata/scan/expression/Expression.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.scan.expression;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.intf.ExpressionType;
-import org.carbondata.scan.filter.intf.RowIntf;
-
-public abstract class Expression implements Serializable {
-
-  private static final long serialVersionUID = -7568676723039530713L;
-  protected List<Expression> children =
-      new ArrayList<Expression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-
-  public abstract ExpressionResult evaluate(RowIntf value)
-      throws FilterUnsupportedException, FilterIllegalMemberException;
-
-  public abstract ExpressionType getFilterExpressionType();
-
-  public List<Expression> getChildren() {
-    return children;
-  }
-
-  public abstract String getString();
-
-  // public abstract void  accept(ExpressionVisitor visitor);
-}


[52/52] incubator-carbondata git commit: [CARBONDATA-142]Renamed packages to org.apache.carbondata This closes #74

Posted by ch...@apache.org.
[CARBONDATA-142]Renamed packages to org.apache.carbondata This closes #74


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/6a2c504f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/6a2c504f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/6a2c504f

Branch: refs/heads/master
Commit: 6a2c504fd06ee21ac371c5aa1956b156a159be67
Parents: d545910 cd6a4ff
Author: chenliang613 <ch...@apache.org>
Authored: Mon Aug 15 15:08:01 2016 +0800
Committer: chenliang613 <ch...@apache.org>
Committed: Mon Aug 15 15:08:01 2016 +0800

----------------------------------------------------------------------
 .../carbondata/common/CarbonIterator.java       |   38 +
 .../carbondata/common/logging/LogService.java   |   47 +
 .../common/logging/LogServiceFactory.java       |   46 +
 .../impl/AuditExtendedRollingFileAppender.java  |   41 +
 .../common/logging/impl/AuditLevel.java         |   61 +
 .../impl/ExtendedRollingFileAppender.java       |  239 +
 .../common/logging/impl/FileUtil.java           |   96 +
 .../common/logging/impl/StandardLogService.java |  317 +
 .../common/logging/impl/StatisticLevel.java     |   64 +
 .../org/carbondata/common/CarbonIterator.java   |   38 -
 .../carbondata/common/logging/LogService.java   |   47 -
 .../common/logging/LogServiceFactory.java       |   46 -
 .../impl/AuditExtendedRollingFileAppender.java  |   41 -
 .../common/logging/impl/AuditLevel.java         |   61 -
 .../impl/ExtendedRollingFileAppender.java       |  239 -
 .../common/logging/impl/FileUtil.java           |   96 -
 .../common/logging/impl/StandardLogService.java |  317 -
 .../common/logging/impl/StatisticLevel.java     |   64 -
 common/src/test/java/log4j.properties           |    4 +-
 .../logging/LogServiceFactoryTest_UT.java       |   42 +
 .../logging/ft/LoggingServiceTest_FT.java       |   92 +
 ...AuditExtendedRollingFileAppenderTest_UT.java |   75 +
 .../common/logging/impl/AuditLevelTest_UT.java  |   48 +
 .../ExtendedRollingFileAppenderTest_UT.java     |   71 +
 .../common/logging/impl/FileUtilTest_UT.java    |   62 +
 .../logging/impl/StandardLogServiceTest_UT.java |  157 +
 .../logging/LogServiceFactoryTest_UT.java       |   42 -
 .../logging/ft/LoggingServiceTest_FT.java       |   92 -
 ...AuditExtendedRollingFileAppenderTest_UT.java |   78 -
 .../common/logging/impl/AuditLevelTest_UT.java  |   50 -
 .../ExtendedRollingFileAppenderTest_UT.java     |   74 -
 .../common/logging/impl/FileUtilTest_UT.java    |   62 -
 .../logging/impl/StandardLogServiceTest_UT.java |  159 -
 .../common/ext/ColumnUniqueIdGenerator.java     |   41 +
 .../common/ext/DictionaryFactory.java           |  119 +
 .../carbondata/common/ext/PathFactory.java      |   48 +
 .../common/factory/CarbonCommonFactory.java     |   54 +
 .../org/apache/carbondata/core/cache/Cache.java |   71 +
 .../carbondata/core/cache/CacheProvider.java    |  154 +
 .../apache/carbondata/core/cache/CacheType.java |   62 +
 .../apache/carbondata/core/cache/Cacheable.java |   50 +
 .../carbondata/core/cache/CarbonLRUCache.java   |  251 +
 .../AbstractColumnDictionaryInfo.java           |  279 +
 .../dictionary/AbstractDictionaryCache.java     |  297 +
 .../cache/dictionary/ColumnDictionaryInfo.java  |  283 +
 .../dictionary/ColumnReverseDictionaryInfo.java |  116 +
 .../core/cache/dictionary/Dictionary.java       |  100 +
 .../dictionary/DictionaryByteArrayWrapper.java  |   94 +
 .../cache/dictionary/DictionaryCacheLoader.java |   45 +
 .../dictionary/DictionaryCacheLoaderImpl.java   |  142 +
 .../dictionary/DictionaryChunksWrapper.java     |  127 +
 .../DictionaryColumnUniqueIdentifier.java       |  113 +
 .../core/cache/dictionary/DictionaryInfo.java   |   91 +
 .../cache/dictionary/ForwardDictionary.java     |  153 +
 .../dictionary/ForwardDictionaryCache.java      |  210 +
 .../cache/dictionary/ReverseDictionary.java     |  129 +
 .../dictionary/ReverseDictionaryCache.java      |  211 +
 .../core/carbon/AbsoluteTableIdentifier.java    |  111 +
 .../core/carbon/CarbonDataLoadSchema.java       |  207 +
 .../core/carbon/CarbonTableIdentifier.java      |  131 +
 .../core/carbon/ColumnIdentifier.java           |  113 +
 .../core/carbon/datastore/BTreeBuilderInfo.java |   61 +
 .../core/carbon/datastore/BlockIndexStore.java  |  309 +
 .../core/carbon/datastore/BtreeBuilder.java     |   38 +
 .../core/carbon/datastore/DataRefNode.java      |  105 +
 .../carbon/datastore/DataRefNodeFinder.java     |   45 +
 .../core/carbon/datastore/IndexKey.java         |   62 +
 .../carbon/datastore/SegmentTaskIndexStore.java |  334 +
 .../carbon/datastore/block/AbstractIndex.java   |   70 +
 .../core/carbon/datastore/block/BlockIndex.java |   53 +
 .../carbon/datastore/block/Distributable.java   |   25 +
 .../datastore/block/SegmentProperties.java      |  748 ++
 .../datastore/block/SegmentTaskIndex.java       |   58 +
 .../carbon/datastore/block/TableBlockInfo.java  |  204 +
 .../carbon/datastore/block/TableTaskInfo.java   |  114 +
 .../carbon/datastore/block/TaskBlockInfo.java   |   68 +
 .../chunk/DimensionChunkAttributes.java         |  102 +
 .../chunk/DimensionColumnDataChunk.java         |   71 +
 .../datastore/chunk/MeasureColumnDataChunk.java |   71 +
 .../impl/ColumnGroupDimensionDataChunk.java     |  128 +
 .../impl/FixedLengthDimensionDataChunk.java     |  123 +
 .../impl/VariableLengthDimensionDataChunk.java  |  114 +
 .../reader/DimensionColumnChunkReader.java      |   48 +
 .../chunk/reader/MeasureColumnChunkReader.java  |   47 +
 .../reader/dimension/AbstractChunkReader.java   |  143 +
 ...CompressedDimensionChunkFileBasedReader.java |  135 +
 .../measure/AbstractMeasureChunkReader.java     |   75 +
 .../CompressedMeasureChunkFileBasedReader.java  |   92 +
 .../exception/IndexBuilderException.java        |   96 +
 .../impl/btree/AbstractBTreeBuilder.java        |  165 +
 .../impl/btree/AbstractBTreeLeafNode.java       |  221 +
 .../impl/btree/BTreeDataRefNodeFinder.java      |  264 +
 .../carbon/datastore/impl/btree/BTreeNode.java  |   71 +
 .../datastore/impl/btree/BTreeNonLeafNode.java  |  232 +
 .../datastore/impl/btree/BlockBTreeBuilder.java |  118 +
 .../impl/btree/BlockBTreeLeafNode.java          |   64 +
 .../impl/btree/BlockletBTreeBuilder.java        |  104 +
 .../impl/btree/BlockletBTreeLeafNode.java       |  132 +
 .../core/carbon/metadata/CarbonMetadata.java    |  165 +
 .../carbon/metadata/blocklet/BlockletInfo.java  |  114 +
 .../metadata/blocklet/DataFileFooter.java       |  170 +
 .../carbon/metadata/blocklet/SegmentInfo.java   |   72 +
 .../compressor/ChunkCompressorMeta.java         |   92 +
 .../blocklet/compressor/CompressionCodec.java   |   31 +
 .../metadata/blocklet/datachunk/DataChunk.java  |  327 +
 .../blocklet/datachunk/PresenceMeta.java        |   66 +
 .../blocklet/index/BlockletBTreeIndex.java      |   76 +
 .../metadata/blocklet/index/BlockletIndex.java  |   77 +
 .../blocklet/index/BlockletMinMaxIndex.java     |   83 +
 .../metadata/blocklet/sort/SortState.java       |   38 +
 .../metadata/converter/SchemaConverter.java     |  105 +
 .../ThriftWrapperSchemaConverterImpl.java       |  386 +
 .../carbon/metadata/datatype/ConvertedType.java |  122 +
 .../core/carbon/metadata/datatype/DataType.java |   48 +
 .../core/carbon/metadata/encoder/Encoding.java  |   31 +
 .../carbon/metadata/index/BlockIndexInfo.java   |   92 +
 .../carbon/metadata/schema/SchemaEvolution.java |   52 +
 .../metadata/schema/SchemaEvolutionEntry.java   |   93 +
 .../metadata/schema/table/CarbonTable.java      |  393 +
 .../carbon/metadata/schema/table/TableInfo.java |  239 +
 .../metadata/schema/table/TableSchema.java      |  185 +
 .../schema/table/column/CarbonColumn.java       |  174 +
 .../schema/table/column/CarbonDimension.java    |  154 +
 .../schema/table/column/CarbonMeasure.java      |  112 +
 .../schema/table/column/ColumnSchema.java       |  418 +
 .../carbon/path/CarbonSharedDictionaryPath.java |   73 +
 .../core/carbon/path/CarbonStorePath.java       |   69 +
 .../core/carbon/path/CarbonTablePath.java       |  425 +
 .../carbon/querystatistics/QueryStatistic.java  |   85 +
 .../QueryStatisticsRecorder.java                |   74 +
 .../core/constants/CarbonCommonConstants.java   |  892 ++
 .../core/constants/IgnoreDictionary.java        |   52 +
 .../core/datastorage/store/FileHolder.java      |   87 +
 .../datastorage/store/MeasureDataWrapper.java   |   30 +
 .../core/datastorage/store/NodeKeyStore.java    |   64 +
 .../datastorage/store/NodeMeasureDataStore.java |   41 +
 .../columnar/BlockIndexerStorageForInt.java     |  226 +
 .../BlockIndexerStorageForNoInvertedIndex.java  |  159 +
 .../store/columnar/ColumnGroupModel.java        |  116 +
 .../store/columnar/ColumnWithIntIndex.java      |   82 +
 .../columnar/ColumnWithIntIndexForHighCard.java |   49 +
 .../store/columnar/ColumnarKeyStore.java        |   47 +
 .../columnar/ColumnarKeyStoreDataHolder.java    |   97 +
 .../store/columnar/ColumnarKeyStoreInfo.java    |  262 +
 .../columnar/ColumnarKeyStoreMetadata.java      |  150 +
 .../store/columnar/IndexStorage.java            |   44 +
 .../store/columnar/UnBlockIndexer.java          |   78 +
 .../store/compression/Compressor.java           |   28 +
 .../store/compression/MeasureMetaDataModel.java |  217 +
 .../store/compression/SnappyCompression.java    |  273 +
 .../compression/ValueCompressionModel.java      |  236 +
 .../compression/ValueCompressonHolder.java      |  135 +
 .../compression/type/UnCompressByteArray.java   |  137 +
 .../compression/type/UnCompressDefaultLong.java |   51 +
 .../compression/type/UnCompressMaxMinByte.java  |  107 +
 .../type/UnCompressMaxMinByteForLong.java       |   78 +
 .../type/UnCompressMaxMinDefault.java           |  108 +
 .../type/UnCompressMaxMinDefaultLong.java       |   75 +
 .../compression/type/UnCompressMaxMinFloat.java |  107 +
 .../compression/type/UnCompressMaxMinInt.java   |  105 +
 .../compression/type/UnCompressMaxMinLong.java  |  105 +
 .../compression/type/UnCompressMaxMinShort.java |  106 +
 .../type/UnCompressNonDecimalByte.java          |   97 +
 .../type/UnCompressNonDecimalDefault.java       |   97 +
 .../type/UnCompressNonDecimalFloat.java         |  101 +
 .../type/UnCompressNonDecimalInt.java           |   98 +
 .../type/UnCompressNonDecimalLong.java          |  100 +
 .../type/UnCompressNonDecimalMaxMinByte.java    |  108 +
 .../type/UnCompressNonDecimalMaxMinDefault.java |  106 +
 .../type/UnCompressNonDecimalMaxMinFloat.java   |  108 +
 .../type/UnCompressNonDecimalMaxMinInt.java     |  108 +
 .../type/UnCompressNonDecimalMaxMinLong.java    |  110 +
 .../type/UnCompressNonDecimalMaxMinShort.java   |  108 +
 .../type/UnCompressNonDecimalShort.java         |   99 +
 .../compression/type/UnCompressNoneByte.java    |  100 +
 .../compression/type/UnCompressNoneDefault.java |   96 +
 .../compression/type/UnCompressNoneFloat.java   |  101 +
 .../compression/type/UnCompressNoneInt.java     |  101 +
 .../compression/type/UnCompressNoneLong.java    |  101 +
 .../compression/type/UnCompressNoneShort.java   |  104 +
 .../store/dataholder/CarbonReadDataHolder.java  |  115 +
 .../store/dataholder/CarbonWriteDataHolder.java |  188 +
 .../store/filesystem/AbstractDFSCarbonFile.java |  217 +
 .../store/filesystem/CarbonFile.java            |   66 +
 .../store/filesystem/CarbonFileFilter.java      |   24 +
 .../store/filesystem/HDFSCarbonFile.java        |  128 +
 .../store/filesystem/LocalCarbonFile.java       |  226 +
 .../store/filesystem/ViewFSCarbonFile.java      |  126 +
 .../impl/CompressedDataMeasureDataWrapper.java  |   37 +
 .../store/impl/DFSFileHolderImpl.java           |  183 +
 .../datastorage/store/impl/FileFactory.java     |  477 +
 .../datastorage/store/impl/FileHolderImpl.java  |  221 +
 .../store/impl/MemoryMappedFileHolderImpl.java  |  118 +
 ...ractHeavyCompressedDoubleArrayDataStore.java |   94 +
 ...HeavyCompressedDoubleArrayDataFileStore.java |  110 +
 ...yCompressedDoubleArrayDataInMemoryStore.java |   76 +
 .../AbstractDoubleArrayDataStore.java           |   82 +
 .../uncompressed/DoubleArrayDataFileStore.java  |   86 +
 .../DoubleArrayDataInMemoryStore.java           |  163 +
 .../key/columnar/AbstractColumnarKeyStore.java  |  106 +
 .../CompressedColumnarFileKeyStore.java         |  168 +
 .../CompressedColumnarInMemoryStore.java        |  155 +
 .../CompressedColumnarKeyStoreUtil.java         |  108 +
 .../UnCompressedColumnarFileKeyStore.java       |   88 +
 .../UnCompressedColumnarInMemoryStore.java      |   70 +
 .../AbstractCompressedSingleArrayStore.java     |  119 +
 .../CompressedSingleArrayKeyFileStore.java      |   92 +
 .../CompressedSingleArrayKeyInMemoryStore.java  |   46 +
 .../AbstractSingleArrayKeyStore.java            |  107 +
 .../uncompressed/SingleArrayKeyFileStore.java   |  104 +
 .../SingleArrayKeyInMemoryStore.java            |   36 +
 .../core/datastorage/util/StoreFactory.java     |   62 +
 .../core/keygenerator/KeyGenException.java      |   45 +
 .../core/keygenerator/KeyGenerator.java         |  124 +
 .../keygenerator/columnar/ColumnarSplitter.java |  103 +
 .../MultiDimKeyVarLengthEquiSplitGenerator.java |  244 +
 ...tiDimKeyVarLengthVariableSplitGenerator.java |  239 +
 .../DirectDictionaryGenerator.java              |   56 +
 .../DirectDictionaryKeyGeneratorFactory.java    |   53 +
 .../TimeStampDirectDictionaryGenerator.java     |  215 +
 .../TimeStampGranularityConstants.java          |   54 +
 .../TimeStampGranularityTypeValue.java          |   63 +
 .../factory/KeyGeneratorFactory.java            |   56 +
 .../mdkey/AbstractKeyGenerator.java             |   79 +
 .../core/keygenerator/mdkey/Bits.java           |  327 +
 .../mdkey/MultiDimKeyVarLengthGenerator.java    |  117 +
 .../keygenerator/mdkey/NumberCompressor.java    |  220 +
 .../carbondata/core/load/BlockDetails.java      |   78 +
 .../core/load/LoadMetadataDetails.java          |  226 +
 .../carbondata/core/metadata/BlockletInfo.java  |  202 +
 .../core/metadata/BlockletInfoColumnar.java     |  405 +
 .../core/metadata/ValueEncoderMeta.java         |  104 +
 .../reader/CarbonDictionaryColumnMetaChunk.java |  107 +
 .../reader/CarbonDictionaryMetadataReader.java  |   58 +
 .../CarbonDictionaryMetadataReaderImpl.java     |  201 +
 .../core/reader/CarbonDictionaryReader.java     |   70 +
 .../core/reader/CarbonDictionaryReaderImpl.java |  314 +
 .../core/reader/CarbonFooterReader.java         |   78 +
 .../core/reader/CarbonIndexFileReader.java      |   95 +
 .../carbondata/core/reader/ThriftReader.java    |  146 +
 .../CarbonDictionarySortIndexReader.java        |   47 +
 .../CarbonDictionarySortIndexReaderImpl.java    |  228 +
 .../core/service/ColumnUniqueIdService.java     |   34 +
 .../core/service/DictionaryService.java         |   92 +
 .../carbondata/core/service/PathService.java    |   38 +
 .../apache/carbondata/core/util/ByteUtil.java   |  320 +
 .../core/util/CarbonFileFolderComparator.java   |   51 +
 .../core/util/CarbonLoadStatisticsDummy.java    |  104 +
 .../core/util/CarbonLoadStatisticsImpl.java     |  413 +
 .../carbondata/core/util/CarbonMergerUtil.java  |   49 +
 .../core/util/CarbonMetadataUtil.java           |  450 +
 .../carbondata/core/util/CarbonProperties.java  |  494 ++
 .../core/util/CarbonTimeStatisticsFactory.java  |   52 +
 .../apache/carbondata/core/util/CarbonUtil.java | 1428 +++
 .../core/util/CarbonUtilException.java          |   80 +
 .../core/util/DataFileFooterConverter.java      |  475 +
 .../carbondata/core/util/DataTypeUtil.java      |  410 +
 .../carbondata/core/util/LoadStatistics.java    |   63 +
 .../core/util/ValueCompressionUtil.java         | 1027 +++
 .../carbondata/core/writer/ByteArrayHolder.java |   77 +
 .../core/writer/CarbonDictionaryWriter.java     |   63 +
 .../core/writer/CarbonDictionaryWriterImpl.java |  422 +
 .../core/writer/CarbonFooterWriter.java         |   72 +
 .../core/writer/CarbonIndexFileWriter.java      |   64 +
 .../core/writer/HierarchyValueWriterForCSV.java |  320 +
 .../carbondata/core/writer/ThriftWriter.java    |  119 +
 .../exception/CarbonDataWriterException.java    |   81 +
 .../CarbonDictionarySortIndexWriter.java        |   48 +
 .../CarbonDictionarySortIndexWriterImpl.java    |  215 +
 .../sortindex/CarbonDictionarySortInfo.java     |   65 +
 .../CarbonDictionarySortInfoPreparator.java     |  150 +
 .../sortindex/CarbonDictionarySortModel.java    |  179 +
 .../scan/collector/ScannedResultCollector.java  |   38 +
 .../impl/AbstractScannedResultCollector.java    |  157 +
 .../impl/DictionaryBasedResultCollector.java    |  130 +
 .../collector/impl/RawBasedResultCollector.java |   67 +
 .../scan/complextypes/ArrayQueryType.java       |  158 +
 .../scan/complextypes/ComplexQueryType.java     |   80 +
 .../scan/complextypes/PrimitiveQueryType.java   |  175 +
 .../scan/complextypes/StructQueryType.java      |  184 +
 .../carbondata/scan/executor/QueryExecutor.java |   40 +
 .../scan/executor/QueryExecutorFactory.java     |   33 +
 .../exception/QueryExecutionException.java      |   96 +
 .../executor/impl/AbstractQueryExecutor.java    |  412 +
 .../scan/executor/impl/DetailQueryExecutor.java |   42 +
 .../executor/impl/QueryExecutorProperties.java  |   90 +
 .../scan/executor/infos/AggregatorInfo.java     |  149 +
 .../scan/executor/infos/BlockExecutionInfo.java |  681 ++
 .../scan/executor/infos/KeyStructureInfo.java   |  119 +
 .../scan/executor/infos/SortInfo.java           |  125 +
 .../scan/executor/util/QueryUtil.java           |  951 ++
 .../scan/executor/util/RestructureUtil.java     |  135 +
 .../scan/expression/BinaryExpression.java       |   59 +
 .../scan/expression/ColumnExpression.java       |  114 +
 .../carbondata/scan/expression/Expression.java  |   50 +
 .../scan/expression/ExpressionResult.java       |  472 +
 .../scan/expression/LeafExpression.java         |   24 +
 .../scan/expression/LiteralExpression.java      |   69 +
 .../scan/expression/UnaryExpression.java        |   33 +
 .../scan/expression/UnknownExpression.java      |   28 +
 .../expression/arithmetic/AddExpression.java    |   86 +
 .../arithmetic/BinaryArithmeticExpression.java  |   34 +
 .../expression/arithmetic/DivideExpression.java |   86 +
 .../arithmetic/MultiplyExpression.java          |   87 +
 .../arithmetic/SubstractExpression.java         |   88 +
 .../BinaryConditionalExpression.java            |   37 +
 .../conditional/ConditionalExpression.java      |   43 +
 .../conditional/EqualToExpression.java          |  108 +
 .../GreaterThanEqualToExpression.java           |   90 +
 .../conditional/GreaterThanExpression.java      |   93 +
 .../expression/conditional/InExpression.java    |   98 +
 .../conditional/LessThanEqualToExpression.java  |   92 +
 .../conditional/LessThanExpression.java         |   95 +
 .../expression/conditional/ListExpression.java  |   62 +
 .../conditional/NotEqualsExpression.java        |  104 +
 .../expression/conditional/NotInExpression.java |   97 +
 .../exception/FilterIllegalMemberException.java |   98 +
 .../exception/FilterUnsupportedException.java   |   92 +
 .../scan/expression/logical/AndExpression.java  |   63 +
 .../logical/BinaryLogicalExpression.java        |  127 +
 .../scan/expression/logical/NotExpression.java  |   60 +
 .../scan/expression/logical/OrExpression.java   |   62 +
 .../scan/filter/DimColumnFilterInfo.java        |   61 +
 .../scan/filter/FilterExpressionProcessor.java  |  352 +
 .../carbondata/scan/filter/FilterProcessor.java |   60 +
 .../carbondata/scan/filter/FilterUtil.java      | 1395 +++
 .../scan/filter/GenericQueryType.java           |   73 +
 .../filter/executer/AndFilterExecuterImpl.java  |   62 +
 .../executer/DimColumnExecuterFilterInfo.java   |   32 +
 .../ExcludeColGroupFilterExecuterImpl.java      |  137 +
 .../executer/ExcludeFilterExecuterImpl.java     |  188 +
 .../scan/filter/executer/FilterExecuter.java    |   45 +
 .../IncludeColGroupFilterExecuterImpl.java      |  209 +
 .../executer/IncludeFilterExecuterImpl.java     |  206 +
 .../filter/executer/OrFilterExecuterImpl.java   |   52 +
 .../executer/RestructureFilterExecuterImpl.java |   55 +
 .../executer/RowLevelFilterExecuterImpl.java    |  402 +
 .../RowLevelRangeGrtThanFiterExecuterImpl.java  |  209 +
 ...elRangeGrtrThanEquaToFilterExecuterImpl.java |  200 +
 ...velRangeLessThanEqualFilterExecuterImpl.java |  248 +
 .../RowLevelRangeLessThanFiterExecuterImpl.java |  252 +
 .../RowLevelRangeTypeExecuterFacory.java        |   93 +
 .../scan/filter/intf/ExpressionType.java        |   44 +
 .../scan/filter/intf/FilterExecuterType.java    |   28 +
 .../carbondata/scan/filter/intf/RowImpl.java    |   44 +
 .../carbondata/scan/filter/intf/RowIntf.java    |   31 +
 .../filter/resolver/AndFilterResolverImpl.java  |   56 +
 .../resolver/ConditionalFilterResolverImpl.java |  241 +
 .../filter/resolver/FilterResolverIntf.java     |  102 +
 .../resolver/LogicalFilterResolverImpl.java     |  118 +
 .../resolver/RestructureFilterResolverImpl.java |  206 +
 .../resolver/RowLevelFilterResolverImpl.java    |  141 +
 .../RowLevelRangeFilterResolverImpl.java        |  287 +
 .../metadata/FilterResolverMetadata.java        |   62 +
 .../DimColumnResolvedFilterInfo.java            |  194 +
 .../MeasureColumnResolvedFilterInfo.java        |  105 +
 .../visitable/ResolvedFilterInfoVisitable.java  |   38 +
 .../visitor/CustomTypeDictionaryVisitor.java    |  110 +
 .../visitor/DictionaryColumnVisitor.java        |   74 +
 .../visitor/FilterInfoTypeVisitorFactory.java   |   45 +
 .../visitor/NoDictionaryTypeVisitor.java        |   69 +
 .../visitor/ResolvedFilterInfoVisitorIntf.java  |   40 +
 .../carbondata/scan/model/CarbonQueryPlan.java  |  239 +
 .../carbondata/scan/model/QueryColumn.java      |  109 +
 .../carbondata/scan/model/QueryDimension.java   |   58 +
 .../carbondata/scan/model/QueryMeasure.java     |   61 +
 .../carbondata/scan/model/QueryModel.java       |  507 ++
 .../carbondata/scan/model/QuerySchemaInfo.java  |   86 +
 .../carbondata/scan/model/SortOrderType.java    |   57 +
 .../processor/AbstractDataBlockIterator.java    |  140 +
 .../scan/processor/BlockletIterator.java        |   88 +
 .../scan/processor/BlocksChunkHolder.java       |  125 +
 .../processor/impl/DataBlockIteratorImpl.java   |   63 +
 .../scan/result/AbstractScannedResult.java      |  437 +
 .../carbondata/scan/result/BatchResult.java     |  105 +
 .../apache/carbondata/scan/result/Result.java   |   70 +
 .../result/impl/FilterQueryScannedResult.java   |  147 +
 .../impl/NonFilterQueryScannedResult.java       |  146 +
 .../AbstractDetailQueryResultIterator.java      |  130 +
 .../scan/result/iterator/ChunkRowIterator.java  |   79 +
 .../iterator/DetailQueryResultIterator.java     |   88 +
 .../scan/result/iterator/RawResultIterator.java |  169 +
 .../scan/scanner/AbstractBlockletScanner.java   |   62 +
 .../scan/scanner/BlockletScanner.java           |   41 +
 .../scan/scanner/impl/FilterScanner.java        |  174 +
 .../scan/scanner/impl/NonFilterScanner.java     |   37 +
 .../scan/wrappers/ByteArrayWrapper.java         |  202 +
 .../common/ext/ColumnUniqueIdGenerator.java     |   41 -
 .../common/ext/DictionaryFactory.java           |  119 -
 .../org/carbondata/common/ext/PathFactory.java  |   48 -
 .../common/factory/CarbonCommonFactory.java     |   54 -
 .../java/org/carbondata/core/cache/Cache.java   |   71 -
 .../carbondata/core/cache/CacheProvider.java    |  154 -
 .../org/carbondata/core/cache/CacheType.java    |   62 -
 .../org/carbondata/core/cache/Cacheable.java    |   50 -
 .../carbondata/core/cache/CarbonLRUCache.java   |  251 -
 .../AbstractColumnDictionaryInfo.java           |  279 -
 .../dictionary/AbstractDictionaryCache.java     |  297 -
 .../cache/dictionary/ColumnDictionaryInfo.java  |  283 -
 .../dictionary/ColumnReverseDictionaryInfo.java |  116 -
 .../core/cache/dictionary/Dictionary.java       |  100 -
 .../dictionary/DictionaryByteArrayWrapper.java  |   94 -
 .../cache/dictionary/DictionaryCacheLoader.java |   45 -
 .../dictionary/DictionaryCacheLoaderImpl.java   |  142 -
 .../dictionary/DictionaryChunksWrapper.java     |  127 -
 .../DictionaryColumnUniqueIdentifier.java       |  113 -
 .../core/cache/dictionary/DictionaryInfo.java   |   91 -
 .../cache/dictionary/ForwardDictionary.java     |  153 -
 .../dictionary/ForwardDictionaryCache.java      |  210 -
 .../cache/dictionary/ReverseDictionary.java     |  129 -
 .../dictionary/ReverseDictionaryCache.java      |  211 -
 .../core/carbon/AbsoluteTableIdentifier.java    |  111 -
 .../core/carbon/CarbonDataLoadSchema.java       |  207 -
 .../core/carbon/CarbonTableIdentifier.java      |  131 -
 .../core/carbon/ColumnIdentifier.java           |  113 -
 .../core/carbon/datastore/BTreeBuilderInfo.java |   61 -
 .../core/carbon/datastore/BlockIndexStore.java  |  309 -
 .../core/carbon/datastore/BtreeBuilder.java     |   38 -
 .../core/carbon/datastore/DataRefNode.java      |  105 -
 .../carbon/datastore/DataRefNodeFinder.java     |   45 -
 .../core/carbon/datastore/IndexKey.java         |   62 -
 .../carbon/datastore/SegmentTaskIndexStore.java |  334 -
 .../carbon/datastore/block/AbstractIndex.java   |   70 -
 .../core/carbon/datastore/block/BlockIndex.java |   53 -
 .../carbon/datastore/block/Distributable.java   |   25 -
 .../datastore/block/SegmentProperties.java      |  748 --
 .../datastore/block/SegmentTaskIndex.java       |   58 -
 .../carbon/datastore/block/TableBlockInfo.java  |  204 -
 .../carbon/datastore/block/TableTaskInfo.java   |  114 -
 .../carbon/datastore/block/TaskBlockInfo.java   |   68 -
 .../chunk/DimensionChunkAttributes.java         |  102 -
 .../chunk/DimensionColumnDataChunk.java         |   71 -
 .../datastore/chunk/MeasureColumnDataChunk.java |   71 -
 .../impl/ColumnGroupDimensionDataChunk.java     |  128 -
 .../impl/FixedLengthDimensionDataChunk.java     |  123 -
 .../impl/VariableLengthDimensionDataChunk.java  |  114 -
 .../reader/DimensionColumnChunkReader.java      |   48 -
 .../chunk/reader/MeasureColumnChunkReader.java  |   47 -
 .../reader/dimension/AbstractChunkReader.java   |  143 -
 ...CompressedDimensionChunkFileBasedReader.java |  135 -
 .../measure/AbstractMeasureChunkReader.java     |   75 -
 .../CompressedMeasureChunkFileBasedReader.java  |   92 -
 .../exception/IndexBuilderException.java        |   96 -
 .../impl/btree/AbstractBTreeBuilder.java        |  165 -
 .../impl/btree/AbstractBTreeLeafNode.java       |  221 -
 .../impl/btree/BTreeDataRefNodeFinder.java      |  264 -
 .../carbon/datastore/impl/btree/BTreeNode.java  |   71 -
 .../datastore/impl/btree/BTreeNonLeafNode.java  |  232 -
 .../datastore/impl/btree/BlockBTreeBuilder.java |  118 -
 .../impl/btree/BlockBTreeLeafNode.java          |   64 -
 .../impl/btree/BlockletBTreeBuilder.java        |  104 -
 .../impl/btree/BlockletBTreeLeafNode.java       |  132 -
 .../core/carbon/metadata/CarbonMetadata.java    |  165 -
 .../carbon/metadata/blocklet/BlockletInfo.java  |  114 -
 .../metadata/blocklet/DataFileFooter.java       |  170 -
 .../carbon/metadata/blocklet/SegmentInfo.java   |   72 -
 .../compressor/ChunkCompressorMeta.java         |   92 -
 .../blocklet/compressor/CompressionCodec.java   |   31 -
 .../metadata/blocklet/datachunk/DataChunk.java  |  327 -
 .../blocklet/datachunk/PresenceMeta.java        |   66 -
 .../blocklet/index/BlockletBTreeIndex.java      |   76 -
 .../metadata/blocklet/index/BlockletIndex.java  |   77 -
 .../blocklet/index/BlockletMinMaxIndex.java     |   83 -
 .../metadata/blocklet/sort/SortState.java       |   38 -
 .../metadata/converter/SchemaConverter.java     |  105 -
 .../ThriftWrapperSchemaConverterImpl.java       |  382 -
 .../carbon/metadata/datatype/ConvertedType.java |  122 -
 .../core/carbon/metadata/datatype/DataType.java |   48 -
 .../core/carbon/metadata/encoder/Encoding.java  |   31 -
 .../carbon/metadata/index/BlockIndexInfo.java   |   92 -
 .../carbon/metadata/schema/SchemaEvolution.java |   52 -
 .../metadata/schema/SchemaEvolutionEntry.java   |   93 -
 .../metadata/schema/table/CarbonTable.java      |  399 -
 .../carbon/metadata/schema/table/TableInfo.java |  239 -
 .../metadata/schema/table/TableSchema.java      |  185 -
 .../schema/table/column/CarbonColumn.java       |  174 -
 .../schema/table/column/CarbonDimension.java    |  154 -
 .../schema/table/column/CarbonMeasure.java      |  112 -
 .../schema/table/column/ColumnSchema.java       |  418 -
 .../carbon/path/CarbonSharedDictionaryPath.java |   73 -
 .../core/carbon/path/CarbonStorePath.java       |   69 -
 .../core/carbon/path/CarbonTablePath.java       |  425 -
 .../carbon/querystatistics/QueryStatistic.java  |   85 -
 .../QueryStatisticsRecorder.java                |   74 -
 .../core/constants/CarbonCommonConstants.java   |  892 --
 .../core/constants/IgnoreDictionary.java        |   52 -
 .../core/datastorage/store/FileHolder.java      |   87 -
 .../datastorage/store/MeasureDataWrapper.java   |   30 -
 .../core/datastorage/store/NodeKeyStore.java    |   64 -
 .../datastorage/store/NodeMeasureDataStore.java |   41 -
 .../columnar/BlockIndexerStorageForInt.java     |  226 -
 .../BlockIndexerStorageForNoInvertedIndex.java  |  159 -
 .../store/columnar/ColumnGroupModel.java        |  116 -
 .../store/columnar/ColumnWithIntIndex.java      |   82 -
 .../columnar/ColumnWithIntIndexForHighCard.java |   49 -
 .../store/columnar/ColumnarKeyStore.java        |   47 -
 .../columnar/ColumnarKeyStoreDataHolder.java    |   97 -
 .../store/columnar/ColumnarKeyStoreInfo.java    |  262 -
 .../columnar/ColumnarKeyStoreMetadata.java      |  150 -
 .../store/columnar/IndexStorage.java            |   44 -
 .../store/columnar/UnBlockIndexer.java          |   78 -
 .../store/compression/Compressor.java           |   28 -
 .../store/compression/MeasureMetaDataModel.java |  217 -
 .../store/compression/SnappyCompression.java    |  273 -
 .../compression/ValueCompressionModel.java      |  236 -
 .../compression/ValueCompressonHolder.java      |  135 -
 .../compression/type/UnCompressByteArray.java   |  137 -
 .../compression/type/UnCompressDefaultLong.java |   51 -
 .../compression/type/UnCompressMaxMinByte.java  |  107 -
 .../type/UnCompressMaxMinByteForLong.java       |   78 -
 .../type/UnCompressMaxMinDefault.java           |  108 -
 .../type/UnCompressMaxMinDefaultLong.java       |   75 -
 .../compression/type/UnCompressMaxMinFloat.java |  107 -
 .../compression/type/UnCompressMaxMinInt.java   |  105 -
 .../compression/type/UnCompressMaxMinLong.java  |  105 -
 .../compression/type/UnCompressMaxMinShort.java |  106 -
 .../type/UnCompressNonDecimalByte.java          |   97 -
 .../type/UnCompressNonDecimalDefault.java       |   97 -
 .../type/UnCompressNonDecimalFloat.java         |  101 -
 .../type/UnCompressNonDecimalInt.java           |   98 -
 .../type/UnCompressNonDecimalLong.java          |  100 -
 .../type/UnCompressNonDecimalMaxMinByte.java    |  108 -
 .../type/UnCompressNonDecimalMaxMinDefault.java |  106 -
 .../type/UnCompressNonDecimalMaxMinFloat.java   |  108 -
 .../type/UnCompressNonDecimalMaxMinInt.java     |  108 -
 .../type/UnCompressNonDecimalMaxMinLong.java    |  110 -
 .../type/UnCompressNonDecimalMaxMinShort.java   |  108 -
 .../type/UnCompressNonDecimalShort.java         |   99 -
 .../compression/type/UnCompressNoneByte.java    |  100 -
 .../compression/type/UnCompressNoneDefault.java |   96 -
 .../compression/type/UnCompressNoneFloat.java   |  101 -
 .../compression/type/UnCompressNoneInt.java     |  101 -
 .../compression/type/UnCompressNoneLong.java    |  101 -
 .../compression/type/UnCompressNoneShort.java   |  104 -
 .../store/dataholder/CarbonReadDataHolder.java  |  115 -
 .../store/dataholder/CarbonWriteDataHolder.java |  188 -
 .../store/filesystem/AbstractDFSCarbonFile.java |  217 -
 .../store/filesystem/CarbonFile.java            |   66 -
 .../store/filesystem/CarbonFileFilter.java      |   24 -
 .../store/filesystem/HDFSCarbonFile.java        |  128 -
 .../store/filesystem/LocalCarbonFile.java       |  226 -
 .../store/filesystem/ViewFSCarbonFile.java      |  126 -
 .../impl/CompressedDataMeasureDataWrapper.java  |   37 -
 .../store/impl/DFSFileHolderImpl.java           |  183 -
 .../datastorage/store/impl/FileFactory.java     |  477 -
 .../datastorage/store/impl/FileHolderImpl.java  |  221 -
 .../store/impl/MemoryMappedFileHolderImpl.java  |  118 -
 ...ractHeavyCompressedDoubleArrayDataStore.java |   94 -
 ...HeavyCompressedDoubleArrayDataFileStore.java |  110 -
 ...yCompressedDoubleArrayDataInMemoryStore.java |   76 -
 .../AbstractDoubleArrayDataStore.java           |   82 -
 .../uncompressed/DoubleArrayDataFileStore.java  |   86 -
 .../DoubleArrayDataInMemoryStore.java           |  163 -
 .../key/columnar/AbstractColumnarKeyStore.java  |  106 -
 .../CompressedColumnarFileKeyStore.java         |  168 -
 .../CompressedColumnarInMemoryStore.java        |  155 -
 .../CompressedColumnarKeyStoreUtil.java         |  108 -
 .../UnCompressedColumnarFileKeyStore.java       |   88 -
 .../UnCompressedColumnarInMemoryStore.java      |   70 -
 .../AbstractCompressedSingleArrayStore.java     |  119 -
 .../CompressedSingleArrayKeyFileStore.java      |   92 -
 .../CompressedSingleArrayKeyInMemoryStore.java  |   46 -
 .../AbstractSingleArrayKeyStore.java            |  107 -
 .../uncompressed/SingleArrayKeyFileStore.java   |  104 -
 .../SingleArrayKeyInMemoryStore.java            |   36 -
 .../core/datastorage/util/StoreFactory.java     |   62 -
 .../core/keygenerator/KeyGenException.java      |   45 -
 .../core/keygenerator/KeyGenerator.java         |  124 -
 .../keygenerator/columnar/ColumnarSplitter.java |  103 -
 .../MultiDimKeyVarLengthEquiSplitGenerator.java |  244 -
 ...tiDimKeyVarLengthVariableSplitGenerator.java |  239 -
 .../DirectDictionaryGenerator.java              |   56 -
 .../DirectDictionaryKeyGeneratorFactory.java    |   53 -
 .../TimeStampDirectDictionaryGenerator.java     |  215 -
 .../TimeStampGranularityConstants.java          |   54 -
 .../TimeStampGranularityTypeValue.java          |   63 -
 .../factory/KeyGeneratorFactory.java            |   56 -
 .../mdkey/AbstractKeyGenerator.java             |   79 -
 .../core/keygenerator/mdkey/Bits.java           |  327 -
 .../mdkey/MultiDimKeyVarLengthGenerator.java    |  117 -
 .../keygenerator/mdkey/NumberCompressor.java    |  220 -
 .../org/carbondata/core/load/BlockDetails.java  |   78 -
 .../core/load/LoadMetadataDetails.java          |  226 -
 .../carbondata/core/metadata/BlockletInfo.java  |  202 -
 .../core/metadata/BlockletInfoColumnar.java     |  405 -
 .../core/metadata/ValueEncoderMeta.java         |  104 -
 .../reader/CarbonDictionaryColumnMetaChunk.java |  107 -
 .../reader/CarbonDictionaryMetadataReader.java  |   58 -
 .../CarbonDictionaryMetadataReaderImpl.java     |  201 -
 .../core/reader/CarbonDictionaryReader.java     |   70 -
 .../core/reader/CarbonDictionaryReaderImpl.java |  314 -
 .../core/reader/CarbonFooterReader.java         |   78 -
 .../core/reader/CarbonIndexFileReader.java      |   95 -
 .../carbondata/core/reader/ThriftReader.java    |  146 -
 .../CarbonDictionarySortIndexReader.java        |   47 -
 .../CarbonDictionarySortIndexReaderImpl.java    |  229 -
 .../core/service/ColumnUniqueIdService.java     |   34 -
 .../core/service/DictionaryService.java         |   92 -
 .../carbondata/core/service/PathService.java    |   38 -
 .../java/org/carbondata/core/util/ByteUtil.java |  320 -
 .../core/util/CarbonFileFolderComparator.java   |   51 -
 .../core/util/CarbonLoadStatisticsDummy.java    |  104 -
 .../core/util/CarbonLoadStatisticsImpl.java     |  413 -
 .../carbondata/core/util/CarbonMergerUtil.java  |   49 -
 .../core/util/CarbonMetadataUtil.java           |  450 -
 .../carbondata/core/util/CarbonProperties.java  |  494 --
 .../core/util/CarbonTimeStatisticsFactory.java  |   52 -
 .../org/carbondata/core/util/CarbonUtil.java    | 1426 ---
 .../core/util/CarbonUtilException.java          |   80 -
 .../core/util/DataFileFooterConverter.java      |  467 -
 .../org/carbondata/core/util/DataTypeUtil.java  |  410 -
 .../carbondata/core/util/LoadStatistics.java    |   63 -
 .../core/util/ValueCompressionUtil.java         | 1027 ---
 .../carbondata/core/writer/ByteArrayHolder.java |   77 -
 .../core/writer/CarbonDictionaryWriter.java     |   63 -
 .../core/writer/CarbonDictionaryWriterImpl.java |  422 -
 .../core/writer/CarbonFooterWriter.java         |   71 -
 .../core/writer/CarbonIndexFileWriter.java      |   64 -
 .../core/writer/HierarchyValueWriterForCSV.java |  320 -
 .../carbondata/core/writer/ThriftWriter.java    |  119 -
 .../exception/CarbonDataWriterException.java    |   81 -
 .../CarbonDictionarySortIndexWriter.java        |   48 -
 .../CarbonDictionarySortIndexWriterImpl.java    |  215 -
 .../sortindex/CarbonDictionarySortInfo.java     |   65 -
 .../CarbonDictionarySortInfoPreparator.java     |  150 -
 .../sortindex/CarbonDictionarySortModel.java    |  179 -
 .../scan/collector/ScannedResultCollector.java  |   38 -
 .../impl/AbstractScannedResultCollector.java    |  157 -
 .../impl/DictionaryBasedResultCollector.java    |  130 -
 .../collector/impl/RawBasedResultCollector.java |   67 -
 .../scan/complextypes/ArrayQueryType.java       |  158 -
 .../scan/complextypes/ComplexQueryType.java     |   80 -
 .../scan/complextypes/PrimitiveQueryType.java   |  175 -
 .../scan/complextypes/StructQueryType.java      |  184 -
 .../carbondata/scan/executor/QueryExecutor.java |   40 -
 .../scan/executor/QueryExecutorFactory.java     |   33 -
 .../exception/QueryExecutionException.java      |   96 -
 .../executor/impl/AbstractQueryExecutor.java    |  412 -
 .../scan/executor/impl/DetailQueryExecutor.java |   42 -
 .../executor/impl/QueryExecutorProperties.java  |   90 -
 .../scan/executor/infos/AggregatorInfo.java     |  149 -
 .../scan/executor/infos/BlockExecutionInfo.java |  681 --
 .../scan/executor/infos/KeyStructureInfo.java   |  119 -
 .../scan/executor/infos/SortInfo.java           |  125 -
 .../scan/executor/util/QueryUtil.java           |  951 --
 .../scan/executor/util/RestructureUtil.java     |  135 -
 .../scan/expression/BinaryExpression.java       |   59 -
 .../scan/expression/ColumnExpression.java       |  114 -
 .../carbondata/scan/expression/Expression.java  |   50 -
 .../scan/expression/ExpressionResult.java       |  472 -
 .../scan/expression/LeafExpression.java         |   24 -
 .../scan/expression/LiteralExpression.java      |   69 -
 .../scan/expression/UnaryExpression.java        |   33 -
 .../scan/expression/UnknownExpression.java      |   28 -
 .../expression/arithmetic/AddExpression.java    |   86 -
 .../arithmetic/BinaryArithmeticExpression.java  |   34 -
 .../expression/arithmetic/DivideExpression.java |   86 -
 .../arithmetic/MultiplyExpression.java          |   87 -
 .../arithmetic/SubstractExpression.java         |   88 -
 .../BinaryConditionalExpression.java            |   37 -
 .../conditional/ConditionalExpression.java      |   43 -
 .../conditional/EqualToExpression.java          |  108 -
 .../GreaterThanEqualToExpression.java           |   90 -
 .../conditional/GreaterThanExpression.java      |   93 -
 .../expression/conditional/InExpression.java    |   98 -
 .../conditional/LessThanEqualToExpression.java  |   92 -
 .../conditional/LessThanExpression.java         |   95 -
 .../expression/conditional/ListExpression.java  |   62 -
 .../conditional/NotEqualsExpression.java        |  104 -
 .../expression/conditional/NotInExpression.java |   97 -
 .../exception/FilterIllegalMemberException.java |   98 -
 .../exception/FilterUnsupportedException.java   |   92 -
 .../scan/expression/logical/AndExpression.java  |   63 -
 .../logical/BinaryLogicalExpression.java        |  127 -
 .../scan/expression/logical/NotExpression.java  |   60 -
 .../scan/expression/logical/OrExpression.java   |   62 -
 .../scan/filter/DimColumnFilterInfo.java        |   61 -
 .../scan/filter/FilterExpressionProcessor.java  |  352 -
 .../carbondata/scan/filter/FilterProcessor.java |   60 -
 .../org/carbondata/scan/filter/FilterUtil.java  | 1395 ---
 .../scan/filter/GenericQueryType.java           |   73 -
 .../filter/executer/AndFilterExecuterImpl.java  |   62 -
 .../executer/DimColumnExecuterFilterInfo.java   |   32 -
 .../ExcludeColGroupFilterExecuterImpl.java      |  137 -
 .../executer/ExcludeFilterExecuterImpl.java     |  188 -
 .../scan/filter/executer/FilterExecuter.java    |   45 -
 .../IncludeColGroupFilterExecuterImpl.java      |  209 -
 .../executer/IncludeFilterExecuterImpl.java     |  206 -
 .../filter/executer/OrFilterExecuterImpl.java   |   52 -
 .../executer/RestructureFilterExecuterImpl.java |   55 -
 .../executer/RowLevelFilterExecuterImpl.java    |  402 -
 .../RowLevelRangeGrtThanFiterExecuterImpl.java  |  209 -
 ...elRangeGrtrThanEquaToFilterExecuterImpl.java |  200 -
 ...velRangeLessThanEqualFilterExecuterImpl.java |  248 -
 .../RowLevelRangeLessThanFiterExecuterImpl.java |  252 -
 .../RowLevelRangeTypeExecuterFacory.java        |   93 -
 .../scan/filter/intf/ExpressionType.java        |   44 -
 .../scan/filter/intf/FilterExecuterType.java    |   28 -
 .../carbondata/scan/filter/intf/RowImpl.java    |   44 -
 .../carbondata/scan/filter/intf/RowIntf.java    |   31 -
 .../filter/resolver/AndFilterResolverImpl.java  |   56 -
 .../resolver/ConditionalFilterResolverImpl.java |  241 -
 .../filter/resolver/FilterResolverIntf.java     |  102 -
 .../resolver/LogicalFilterResolverImpl.java     |  118 -
 .../resolver/RestructureFilterResolverImpl.java |  206 -
 .../resolver/RowLevelFilterResolverImpl.java    |  141 -
 .../RowLevelRangeFilterResolverImpl.java        |  287 -
 .../metadata/FilterResolverMetadata.java        |   62 -
 .../DimColumnResolvedFilterInfo.java            |  194 -
 .../MeasureColumnResolvedFilterInfo.java        |  105 -
 .../visitable/ResolvedFilterInfoVisitable.java  |   38 -
 .../visitor/CustomTypeDictionaryVisitor.java    |  110 -
 .../visitor/DictionaryColumnVisitor.java        |   74 -
 .../visitor/FilterInfoTypeVisitorFactory.java   |   45 -
 .../visitor/NoDictionaryTypeVisitor.java        |   69 -
 .../visitor/ResolvedFilterInfoVisitorIntf.java  |   40 -
 .../carbondata/scan/model/CarbonQueryPlan.java  |  239 -
 .../org/carbondata/scan/model/QueryColumn.java  |  109 -
 .../carbondata/scan/model/QueryDimension.java   |   58 -
 .../org/carbondata/scan/model/QueryMeasure.java |   61 -
 .../org/carbondata/scan/model/QueryModel.java   |  507 --
 .../carbondata/scan/model/QuerySchemaInfo.java  |   86 -
 .../carbondata/scan/model/SortOrderType.java    |   57 -
 .../processor/AbstractDataBlockIterator.java    |  140 -
 .../scan/processor/BlockletIterator.java        |   88 -
 .../scan/processor/BlocksChunkHolder.java       |  125 -
 .../processor/impl/DataBlockIteratorImpl.java   |   63 -
 .../scan/result/AbstractScannedResult.java      |  437 -
 .../org/carbondata/scan/result/BatchResult.java |  105 -
 .../java/org/carbondata/scan/result/Result.java |   70 -
 .../result/impl/FilterQueryScannedResult.java   |  147 -
 .../impl/NonFilterQueryScannedResult.java       |  146 -
 .../AbstractDetailQueryResultIterator.java      |  130 -
 .../scan/result/iterator/ChunkRowIterator.java  |   79 -
 .../iterator/DetailQueryResultIterator.java     |   88 -
 .../scan/result/iterator/RawResultIterator.java |  169 -
 .../scan/scanner/AbstractBlockletScanner.java   |   62 -
 .../scan/scanner/BlockletScanner.java           |   41 -
 .../scan/scanner/impl/FilterScanner.java        |  174 -
 .../scan/scanner/impl/NonFilterScanner.java     |   37 -
 .../scan/wrappers/ByteArrayWrapper.java         |  202 -
 .../core/cache/CacheProviderTest.java           |   67 +
 .../dictionary/AbstractDictionaryCacheTest.java |  177 +
 .../dictionary/DictionaryChunksWrapperTest.java |  110 +
 .../dictionary/ForwardDictionaryCacheTest.java  |  278 +
 .../dictionary/ReverseDictionaryCacheTest.java  |  274 +
 .../carbon/datastore/BlockIndexStoreTest.java   |  204 +
 .../datastore/block/SegmentPropertiesTest.java  |  352 +
 .../block/SegmentPropertiesTestUtil.java        |  234 +
 .../impl/btree/BTreeBlockFinderTest.java        |  367 +
 .../carbon/metadata/CarbonMetadataTest.java     |  148 +
 .../metadata/schema/table/CarbonTableTest.java  |  119 +
 .../table/CarbonTableWithComplexTypesTest.java  |  159 +
 .../metadata/schema/table/TableInfoTest.java    |   52 +
 .../metadata/schema/table/TableSchemaTest.java  |   51 +
 .../CarbonFormatDirectoryStructureTest.java     |   73 +
 .../path/CarbonFormatSharedDictionaryTest.java  |   48 +
 ...CarbonDictionarySortIndexReaderImplTest.java |  135 +
 .../carbondata/core/util/ByteUtilTest.java      |  167 +
 .../carbondata/core/util/CarbonUtilTest.java    |   35 +
 .../writer/CarbonDictionaryWriterImplTest.java  |  528 ++
 .../core/writer/CarbonFooterWriterTest.java     |  213 +
 ...CarbonDictionarySortIndexWriterImplTest.java |  158 +
 .../carbondata/scan/QueryExecutor_UT.java       |   35 +
 .../scan/executor/util/QueryUtilTest.java       |  133 +
 .../core/cache/CacheProviderTest.java           |   67 -
 .../dictionary/AbstractDictionaryCacheTest.java |  177 -
 .../dictionary/DictionaryChunksWrapperTest.java |  110 -
 .../dictionary/ForwardDictionaryCacheTest.java  |  278 -
 .../dictionary/ReverseDictionaryCacheTest.java  |  274 -
 .../carbon/datastore/BlockIndexStoreTest.java   |  204 -
 .../datastore/block/SegmentPropertiesTest.java  |  352 -
 .../block/SegmentPropertiesTestUtil.java        |  234 -
 .../impl/btree/BTreeBlockFinderTest.java        |  367 -
 .../carbon/metadata/CarbonMetadataTest.java     |  148 -
 .../metadata/schema/table/CarbonTableTest.java  |  119 -
 .../table/CarbonTableWithComplexTypesTest.java  |  159 -
 .../metadata/schema/table/TableInfoTest.java    |   52 -
 .../metadata/schema/table/TableSchemaTest.java  |   51 -
 .../CarbonFormatDirectoryStructureTest.java     |   73 -
 .../path/CarbonFormatSharedDictionaryTest.java  |   48 -
 ...CarbonDictionarySortIndexReaderImplTest.java |  135 -
 .../org/carbondata/core/util/ByteUtilTest.java  |  167 -
 .../carbondata/core/util/CarbonUtilTest.java    |   35 -
 .../writer/CarbonDictionaryWriterImplTest.java  |  530 --
 .../core/writer/CarbonFooterWriterTest.java     |  213 -
 ...CarbonDictionarySortIndexWriterImplTest.java |  158 -
 .../org/carbondata/scan/QueryExecutor_UT.java   |   35 -
 .../scan/executor/util/QueryUtilTest.java       |  133 -
 dev/findbugs-exclude.xml                        |   10 +-
 dev/java-code-format-template.xml               |    4 +-
 dev/javastyle-config.xml                        |    2 +-
 dev/scalastyle-config.xml                       |    4 +-
 docs/Carbon-Packaging-and-Interfaces.md         |    2 +-
 .../examples/AllDictionaryExample.scala         |   62 +
 .../carbondata/examples/CarbonExample.scala     |   55 +
 .../examples/ComplexTypeExample.scala           |   78 +
 .../examples/DataFrameAPIExample.scala          |   65 +
 .../examples/GenerateDictionaryExample.scala    |   93 +
 .../apache/carbondata/examples/PerfTest.scala   |  331 +
 .../examples/util/AllDictionaryUtil.scala       |  108 +
 .../examples/util/InitForExamples.scala         |   56 +
 .../examples/AllDictionaryExample.scala         |   62 -
 .../org/carbondata/examples/CarbonExample.scala |   55 -
 .../examples/ComplexTypeExample.scala           |   84 -
 .../examples/DataFrameAPIExample.scala          |   65 -
 .../examples/GenerateDictionaryExample.scala    |   93 -
 .../org/carbondata/examples/PerfTest.scala      |  331 -
 .../examples/util/AllDictionaryUtil.scala       |  108 -
 .../examples/util/InitForExamples.scala         |   56 -
 format/src/main/thrift/carbondata.thrift        |    2 +-
 format/src/main/thrift/carbondataindex.thrift   |    2 +-
 format/src/main/thrift/dictionary.thrift        |    2 +-
 format/src/main/thrift/dictionary_meta.thrift   |    2 +-
 format/src/main/thrift/schema.thrift            |    2 +-
 format/src/main/thrift/sort_index.thrift        |    2 +-
 format/src/main/thrift/table_status.thrift      |    2 +-
 .../carbondata/hadoop/CarbonInputFormat.java    |  742 ++
 .../carbondata/hadoop/CarbonInputSplit.java     |   69 +
 .../carbondata/hadoop/CarbonPathFilter.java     |   44 +
 .../carbondata/hadoop/CarbonProjection.java     |   42 +
 .../hadoop/CarbonRawDataInputSplit.java         |   63 +
 .../carbondata/hadoop/CarbonRecordReader.java   |  106 +
 .../hadoop/readsupport/CarbonReadSupport.java   |   46 +
 .../AbstractDictionaryDecodedReadSupport.java   |   87 +
 .../impl/ArrayWritableReadSupport.java          |   50 +
 .../impl/DictionaryDecodedReadSupportImpl.java  |   35 +
 .../readsupport/impl/RawDataReadSupport.java    |   49 +
 .../hadoop/util/CarbonInputFormatUtil.java      |  131 +
 .../hadoop/util/ObjectSerializationUtil.java    |  117 +
 .../carbondata/hadoop/util/SchemaReader.java    |   69 +
 .../carbondata/hadoop/CarbonInputFormat.java    |  742 --
 .../org/carbondata/hadoop/CarbonInputSplit.java |   69 -
 .../org/carbondata/hadoop/CarbonPathFilter.java |   44 -
 .../org/carbondata/hadoop/CarbonProjection.java |   42 -
 .../hadoop/CarbonRawDataInputSplit.java         |   63 -
 .../carbondata/hadoop/CarbonRecordReader.java   |  106 -
 .../hadoop/readsupport/CarbonReadSupport.java   |   46 -
 .../AbstractDictionaryDecodedReadSupport.java   |   87 -
 .../impl/ArrayWritableReadSupport.java          |   50 -
 .../impl/DictionaryDecodedReadSupportImpl.java  |   35 -
 .../readsupport/impl/RawDataReadSupport.java    |   49 -
 .../hadoop/util/CarbonInputFormatUtil.java      |  131 -
 .../hadoop/util/ObjectSerializationUtil.java    |  117 -
 .../carbondata/hadoop/util/SchemaReader.java    |   69 -
 .../hadoop/ft/CarbonInputFormat_FT.java         |   81 +
 .../hadoop/ft/CarbonInputMapperTest.java        |  189 +
 .../test/util/ObjectSerializationUtilTest.java  |   56 +
 .../hadoop/test/util/StoreCreator.java          |  564 ++
 .../hadoop/ft/CarbonInputFormat_FT.java         |   81 -
 .../hadoop/ft/CarbonInputMapperTest.java        |  189 -
 .../test/util/ObjectSerializationUtilTest.java  |   56 -
 .../hadoop/test/util/StoreCreator.java          |  564 --
 .../sql/common/util/CarbonHiveContext.scala     |    4 +-
 .../allqueries/AllDataTypesTestCase1.scala      | 4494 ++++++++++
 .../allqueries/AllDataTypesTestCase2.scala      | 8186 ++++++++++++++++++
 .../allqueries/AllDataTypesTestCase3.scala      | 1472 ++++
 .../allqueries/AllDataTypesTestCase4.scala      | 1956 +++++
 .../allqueries/AllDataTypesTestCase5.scala      | 3268 +++++++
 .../allqueries/AllDataTypesTestCase6.scala      | 2579 ++++++
 .../allqueries/AllDataTypesTestCase1.scala      | 4494 ----------
 .../allqueries/AllDataTypesTestCase2.scala      | 8186 ------------------
 .../allqueries/AllDataTypesTestCase3.scala      | 1472 ----
 .../allqueries/AllDataTypesTestCase4.scala      | 1956 -----
 .../allqueries/AllDataTypesTestCase5.scala      | 3268 -------
 .../allqueries/AllDataTypesTestCase6.scala      | 2579 ------
 .../spark/merger/CarbonCompactionExecutor.java  |  221 +
 .../spark/merger/CarbonCompactionUtil.java      |  132 +
 .../spark/merger/CompactionCallable.java        |   44 +
 .../spark/merger/CompactionType.java            |   28 +
 .../spark/merger/RowResultMerger.java           |  326 +
 .../spark/merger/TupleConversionAdapter.java    |   85 +
 .../MalformedCarbonCommandException.java        |   83 +
 .../carbondata/spark/load/CarbonLoadModel.java  |  527 ++
 .../carbondata/spark/load/CarbonLoaderUtil.java | 1409 +++
 .../spark/load/DeleteLoadFolders.java           |  361 +
 .../spark/load/DeleteLoadFromMetadata.java      |   44 +
 .../spark/load/DeletedLoadMetadata.java         |   53 +
 .../spark/merger/CarbonDataMergerUtil.java      |  729 ++
 .../spark/merger/NodeBlockRelation.java         |   60 +
 .../spark/merger/NodeMultiBlockRelation.java    |   59 +
 .../spark/partition/api/DataPartitioner.java    |   63 +
 .../spark/partition/api/Partition.java          |   51 +
 .../partition/api/impl/CSVFilePartitioner.java  |  365 +
 .../api/impl/DataPartitionerProperties.java     |   90 +
 .../partition/api/impl/DefaultLoadBalancer.java |   78 +
 .../spark/partition/api/impl/PartitionImpl.java |   63 +
 .../api/impl/PartitionMultiFileImpl.java        |   51 +
 .../api/impl/QueryPartitionHelper.java          |  189 +
 .../api/impl/SampleDataPartitionerImpl.java     |  151 +
 .../spark/partition/reader/CSVIterator.java     |   74 +
 .../spark/partition/reader/CSVParser.java       |  559 ++
 .../spark/partition/reader/CSVReader.java       |  496 ++
 .../spark/partition/reader/CSVWriter.java       |  396 +
 .../spark/partition/reader/LineReader.java      |   68 +
 .../spark/partition/reader/ResultSetHelper.java |   87 +
 .../reader/ResultSetHelperService.java          |  327 +
 .../readsupport/SparkRowReadSupportImpl.java    |   66 +
 .../carbondata/spark/splits/TableSplit.java     |  129 +
 .../carbondata/spark/util/CarbonQueryUtil.java  |  255 +
 .../carbondata/spark/util/LoadMetadataUtil.java |  113 +
 .../spark/merger/CarbonCompactionExecutor.java  |  221 -
 .../spark/merger/CarbonCompactionUtil.java      |  132 -
 .../spark/merger/CompactionCallable.java        |   44 -
 .../spark/merger/CompactionType.java            |   28 -
 .../spark/merger/RowResultMerger.java           |  326 -
 .../spark/merger/TupleConversionAdapter.java    |   85 -
 .../MalformedCarbonCommandException.java        |   83 -
 .../carbondata/spark/load/CarbonLoadModel.java  |  527 --
 .../carbondata/spark/load/CarbonLoaderUtil.java | 1395 ---
 .../spark/load/DeleteLoadFolders.java           |  361 -
 .../spark/load/DeleteLoadFromMetadata.java      |   44 -
 .../spark/load/DeletedLoadMetadata.java         |   53 -
 .../spark/merger/CarbonDataMergerUtil.java      |  729 --
 .../spark/merger/NodeBlockRelation.java         |   60 -
 .../spark/merger/NodeMultiBlockRelation.java    |   59 -
 .../spark/partition/api/DataPartitioner.java    |   63 -
 .../spark/partition/api/Partition.java          |   51 -
 .../partition/api/impl/CSVFilePartitioner.java  |  365 -
 .../api/impl/DataPartitionerProperties.java     |   90 -
 .../partition/api/impl/DefaultLoadBalancer.java |   78 -
 .../spark/partition/api/impl/PartitionImpl.java |   63 -
 .../api/impl/PartitionMultiFileImpl.java        |   51 -
 .../api/impl/QueryPartitionHelper.java          |  189 -
 .../api/impl/SampleDataPartitionerImpl.java     |  151 -
 .../spark/partition/reader/CSVIterator.java     |   74 -
 .../spark/partition/reader/CSVParser.java       |  559 --
 .../spark/partition/reader/CSVReader.java       |  496 --
 .../spark/partition/reader/CSVWriter.java       |  396 -
 .../spark/partition/reader/LineReader.java      |   68 -
 .../spark/partition/reader/ResultSetHelper.java |   87 -
 .../reader/ResultSetHelperService.java          |  327 -
 .../readsupport/SparkRowReadSupportImpl.java    |   66 -
 .../org/carbondata/spark/splits/TableSplit.java |  128 -
 .../carbondata/spark/util/CarbonQueryUtil.java  |  255 -
 .../carbondata/spark/util/LoadMetadataUtil.java |  113 -
 .../spark/CarbonColumnValidator.scala           |   36 +
 .../apache/carbondata/spark/CarbonFilters.scala |  364 +
 .../apache/carbondata/spark/CarbonOption.scala  |   39 +
 .../carbondata/spark/CarbonSparkFactory.scala   |   60 +
 .../spark/DictionaryDetailHelper.scala          |   62 +
 .../org/apache/carbondata/spark/KeyVal.scala    |   89 +
 .../spark/csv/CarbonCsvRelation.scala           |  242 +
 .../carbondata/spark/csv/CarbonTextFile.scala   |   58 +
 .../carbondata/spark/csv/DefaultSource.scala    |  175 +
 .../org/apache/carbondata/spark/package.scala   |  124 +
 .../spark/rdd/CarbonCleanFilesRDD.scala         |   83 +
 .../spark/rdd/CarbonDataFrameRDD.scala          |   36 +
 .../spark/rdd/CarbonDataLoadRDD.scala           |  489 ++
 .../spark/rdd/CarbonDataPartitionRDD.scala      |  112 +
 .../spark/rdd/CarbonDataRDDFactory.scala        |  921 ++
 .../spark/rdd/CarbonDeleteLoadByDateRDD.scala   |   94 +
 .../spark/rdd/CarbonDeleteLoadRDD.scala         |   84 +
 .../spark/rdd/CarbonDropTableRDD.scala          |   76 +
 .../spark/rdd/CarbonGlobalDictionaryRDD.scala   |  547 ++
 .../carbondata/spark/rdd/CarbonMergerRDD.scala  |  362 +
 .../carbondata/spark/rdd/CarbonScanRDD.scala    |  290 +
 .../apache/carbondata/spark/rdd/Compactor.scala |  154 +
 .../spark/tasks/DictionaryWriterTask.scala      |  110 +
 .../spark/tasks/SortIndexWriterTask.scala       |   59 +
 .../spark/thriftserver/CarbonThriftServer.scala |   58 +
 .../carbondata/spark/util/CarbonScalaUtil.scala |  166 +
 .../carbondata/spark/util/CommonUtil.scala      |  202 +
 .../spark/util/GlobalDictionaryUtil.scala       |  778 ++
 .../carbondata/spark/util/QueryPlanUtil.scala   |   47 +
 .../apache/spark/sql/CarbonBoundReference.scala |    2 +-
 .../org/apache/spark/sql/CarbonContext.scala    |   11 +-
 .../sql/CarbonDatasourceHadoopRelation.scala    |   12 +-
 .../spark/sql/CarbonDatasourceRelation.scala    |   14 +-
 .../spark/sql/CarbonDictionaryDecoder.scala     |   14 +-
 .../org/apache/spark/sql/CarbonOperators.scala  |   11 +-
 .../org/apache/spark/sql/CarbonSqlParser.scala  |   13 +-
 .../spark/sql/SparkUnknownExpression.scala      |   12 +-
 .../execution/command/carbonTableSchema.scala   |   69 +-
 .../spark/sql/hive/CarbonMetastoreCatalog.scala |   51 +-
 .../spark/sql/hive/CarbonSQLDialect.scala       |    2 +-
 .../spark/sql/hive/CarbonStrategies.scala       |    9 +-
 .../spark/sql/hive/DistributionUtil.scala       |    6 +-
 .../spark/sql/optimizer/CarbonOptimizer.scala   |    2 +-
 .../scala/org/apache/spark/util/FileUtils.scala |    8 +-
 .../org/apache/spark/util/SplitUtils.scala      |    2 +-
 .../spark/CarbonColumnValidator.scala           |   36 -
 .../org/carbondata/spark/CarbonFilters.scala    |  364 -
 .../org/carbondata/spark/CarbonOption.scala     |   39 -
 .../carbondata/spark/CarbonSparkFactory.scala   |   63 -
 .../spark/DictionaryDetailHelper.scala          |   66 -
 .../scala/org/carbondata/spark/KeyVal.scala     |   89 -
 .../spark/csv/CarbonCsvRelation.scala           |  242 -
 .../carbondata/spark/csv/CarbonTextFile.scala   |   63 -
 .../carbondata/spark/csv/DefaultSource.scala    |  175 -
 .../scala/org/carbondata/spark/package.scala    |  124 -
 .../spark/rdd/CarbonCleanFilesRDD.scala         |   83 -
 .../spark/rdd/CarbonDataFrameRDD.scala          |   36 -
 .../spark/rdd/CarbonDataLoadRDD.scala           |  495 --
 .../spark/rdd/CarbonDataPartitionRDD.scala      |  113 -
 .../spark/rdd/CarbonDataRDDFactory.scala        |  921 --
 .../spark/rdd/CarbonDeleteLoadByDateRDD.scala   |   95 -
 .../spark/rdd/CarbonDeleteLoadRDD.scala         |   84 -
 .../spark/rdd/CarbonDropTableRDD.scala          |   76 -
 .../spark/rdd/CarbonGlobalDictionaryRDD.scala   |  546 --
 .../carbondata/spark/rdd/CarbonMergerRDD.scala  |  362 -
 .../carbondata/spark/rdd/CarbonScanRDD.scala    |  290 -
 .../org/carbondata/spark/rdd/Compactor.scala    |  154 -
 .../spark/tasks/DictionaryWriterTask.scala      |  110 -
 .../spark/tasks/SortIndexWriterTask.scala       |   62 -
 .../spark/thriftserver/CarbonThriftServer.scala |   58 -
 .../carbondata/spark/util/CarbonScalaUtil.scala |  166 -
 .../org/carbondata/spark/util/CommonUtil.scala  |  202 -
 .../spark/util/GlobalDictionaryUtil.scala       |  780 --
 .../carbondata/spark/util/QueryPlanUtil.scala   |   47 -
 .../spark/load/CarbonLoaderUtilTest.java        |    6 +-
 .../validation/FileFooterValidator.java         |   30 +-
 .../aggquery/IntegerDataTypeTestCase.scala      |   49 +
 .../spark/testsuite/bigdecimal/TestBigInt.scala |   93 +
 .../TestDimensionWithDecimalDataType.scala      |   61 +
 ...plexPrimitiveTimestampDirectDictionary.scala |   72 +
 .../complexType/TestComplexTypeQuery.scala      |  165 +
 .../complexType/TestCreateTableWithDouble.scala |   95 +
 .../MultiFilesDataLoagdingTestCase.scala        |   59 +
 .../dataload/TestLoadDataWithBlankLine.scala    |   49 +
 .../TestLoadDataWithEmptyArrayColumns.scala     |   66 +
 .../dataload/TestLoadDataWithJunkChars.scala    |   61 +
 .../TestLoadDataWithMaxMinInteger.scala         |   98 +
 .../dataload/TestLoadDataWithNullMeasures.scala |   53 +
 .../testsuite/emptyrow/TestEmptyRows.scala      |   82 +
 .../aggquery/AggregateQueryTestCase.scala       |   45 +
 .../AllDataTypesTestCaseAggregate.scala         |  111 +
 .../aggquery/AverageQueryTestCase.scala         |  112 +
 .../AllDataTypesTestCaseAggregate.scala         | 1162 +++
 .../allqueries/TestQueryWithoutDataLoad.scala   |   63 +
 .../testsuite/bigdecimal/TestAvgForBigInt.scala |   59 +
 .../testsuite/bigdecimal/TestBigDecimal.scala   |  204 +
 .../bigdecimal/TestNullAndEmptyFields.scala     |  110 +
 .../blockprune/BlockPruneQueryTestCase.scala    |  115 +
 .../createtable/TestCreateTableSyntax.scala     |  132 +
 .../DataCompactionCardinalityBoundryTest.scala  |  134 +
 .../datacompaction/DataCompactionLockTest.scala |  129 +
 .../DataCompactionNoDictionaryTest.scala        |  179 +
 .../datacompaction/DataCompactionTest.scala     |  176 +
 .../MajorCompactionIgnoreInMinorTest.scala      |  148 +
 .../MajorCompactionStopsAfterCompaction.scala   |  143 +
 .../dataload/DefaultSourceTestCase.scala        |  105 +
 .../TestDataWithDicExcludeAndInclude.scala      |  106 +
 .../dataload/TestLoadDataWithHiveSyntax.scala   |  601 ++
 ...ataWithMalformedCarbonCommandException.scala |  163 +
 .../dataload/TestLoadDataWithNoMeasure.scala    |  128 +
 .../TestLoadDataWithNotProperInputFile.scala    |   76 +
 .../dataload/TestLoadTblNameIsKeyword.scala     |   96 +
 .../TestNoInvertedIndexLoadAndQuery.scala       |  105 +
 .../dataretention/DataRetentionTestCase.scala   |  257 +
 .../deleteTable/TestDeleteTableNewDDL.scala     |  112 +
 .../detailquery/AllDataTypesTestCase.scala      |   54 +
 .../ColumnGroupDataTypesTestCase.scala          |  149 +
 .../ColumnPropertyValidationTestCase.scala      |   46 +
 .../HighCardinalityDataTypesTestCase.scala      |  250 +
 .../detailquery/IntegerDataTypeTestCase.scala   |   48 +
 .../NoDictionaryColumnTestCase.scala            |   60 +
 ...estampDataTypeDirectDictionaryTestCase.scala |  157 +
 ...TypeDirectDictionaryWithNoDictTestCase.scala |  107 +
 .../TimestampDataTypeNullDataTest.scala         |   92 +
 .../filterexpr/AllDataTypesTestCaseFilter.scala |   66 +
 .../filterexpr/CountStarTestCase.scala          |   73 +
 .../filterexpr/FilterProcessorTestCase.scala    |  298 +
 .../GrtLtFilterProcessorTestCase.scala          |  176 +
 .../filterexpr/IntegerDataTypeTestCase.scala    |   48 +
 .../NullMeasureValueTestCaseFilter.scala        |   58 +
 .../HadoopFSRelationTestCase.scala              |   69 +
 .../joinquery/AllDataTypesTestCaseJoin.scala    |   54 +
 .../joinquery/IntegerDataTypeTestCase.scala     |   48 +
 .../NullMeasureValueTestCaseAggregate.scala     |   85 +
 .../TestNullValueSerialization.scala            |  112 +
 .../sortexpr/AllDataTypesTestCaseSort.scala     |   54 +
 .../sortexpr/IntegerDataTypeTestCase.scala      |   49 +
 .../windowsexpr/WindowsExprTestCase.scala       |   78 +
 .../spark/util/AllDictionaryTestCase.scala      |  140 +
 .../AutoHighCardinalityIdentifyTestCase.scala   |  178 +
 .../spark/util/DictionaryTestCaseUtil.scala     |   51 +
 .../util/ExternalColumnDictionaryTestCase.scala |  213 +
 ...GlobalDictionaryUtilConcurrentTestCase.scala |  179 +
 .../util/GlobalDictionaryUtilTestCase.scala     |  214 +
 .../apache/spark/sql/TestCarbonSqlParser.scala  |    2 +-
 .../sql/common/util/CarbonHiveContext.scala     |    4 +-
 ...plexPrimitiveTimestampDirectDictionary.scala |   72 -
 .../complexType/TestComplexTypeQuery.scala      |  165 -
 .../complexType/TestCreateTableWithDouble.scala |   95 -
 .../dataload/TestLoadDataWithBlankLine.scala    |   49 -
 .../TestLoadDataWithEmptyArrayColumns.scala     |   66 -
 .../dataload/TestLoadDataWithJunkChars.scala    |   61 -
 .../TestLoadDataWithMaxMinInteger.scala         |   98 -
 .../dataload/TestLoadDataWithNullMeasures.scala |   53 -
 .../aggquery/AggregateQueryTestCase.scala       |   45 -
 .../AllDataTypesTestCaseAggregate.scala         |  111 -
 .../aggquery/AverageQueryTestCase.scala         |  110 -
 .../aggquery/IntegerDataTypeTestCase.scala      |   49 -
 .../AllDataTypesTestCaseAggregate.scala         | 1162 ---
 .../allqueries/TestQueryWithoutDataLoad.scala   |   63 -
 .../testsuite/bigdecimal/TestAvgForBigInt.scala |   59 -
 .../testsuite/bigdecimal/TestBigDecimal.scala   |  204 -
 .../spark/testsuite/bigdecimal/TestBigInt.scala |   93 -
 .../TestDimensionWithDecimalDataType.scala      |   61 -
 .../bigdecimal/TestNullAndEmptyFields.scala     |  110 -
 .../blockprune/BlockPruneQueryTestCase.scala    |  115 -
 .../createtable/TestCreateTableSyntax.scala     |  132 -
 .../DataCompactionCardinalityBoundryTest.scala  |  134 -
 .../datacompaction/DataCompactionLockTest.scala |  129 -
 .../DataCompactionNoDictionaryTest.scala        |  179 -
 .../datacompaction/DataCompactionTest.scala     |  176 -
 .../MajorCompactionIgnoreInMinorTest.scala      |  148 -
 .../MajorCompactionStopsAfterCompaction.scala   |  143 -
 .../dataload/DefaultSourceTestCase.scala        |  105 -
 .../MultiFilesDataLoagdingTestCase.scala        |   59 -
 .../TestDataWithDicExcludeAndInclude.scala      |  107 -
 .../dataload/TestLoadDataWithHiveSyntax.scala   |  600 --
 ...ataWithMalformedCarbonCommandException.scala |  163 -
 .../dataload/TestLoadDataWithNoMeasure.scala    |  128 -
 .../TestLoadDataWithNotProperInputFile.scala    |   78 -
 .../dataload/TestLoadTblNameIsKeyword.scala     |   96 -
 .../TestNoInvertedIndexLoadAndQuery.scala       |  105 -
 .../dataretention/DataRetentionTestCase.scala   |  257 -
 .../deleteTable/TestDeleteTableNewDDL.scala     |  112 -
 .../detailquery/AllDataTypesTestCase.scala      |   54 -
 .../ColumnGroupDataTypesTestCase.scala          |  149 -
 .../ColumnPropertyValidationTestCase.scala      |   46 -
 .../HighCardinalityDataTypesTestCase.scala      |  250 -
 .../detailquery/IntegerDataTypeTestCase.scala   |   48 -
 .../NoDictionaryColumnTestCase.scala            |   60 -
 ...estampDataTypeDirectDictionaryTestCase.scala |  157 -
 ...TypeDirectDictionaryWithNoDictTestCase.scala |  107 -
 .../TimestampDataTypeNullDataTest.scala         |   92 -
 .../testsuite/emptyrow/TestEmptyRows.scala      |   82 -
 .../filterexpr/AllDataTypesTestCaseFilter.scala |   66 -
 .../filterexpr/CountStarTestCase.scala          |   72 -
 .../filterexpr/FilterProcessorTestCase.scala    |  294 -
 .../GrtLtFilterProcessorTestCase.scala          |  176 -
 .../filterexpr/IntegerDataTypeTestCase.scala    |   48 -
 .../NullMeasureValueTestCaseFilter.scala        |   58 -
 .../HadoopFSRelationTestCase.scala              |   69 -
 .../joinquery/AllDataTypesTestCaseJoin.scala    |   54 -
 .../joinquery/IntegerDataTypeTestCase.scala     |   48 -
 .../NullMeasureValueTestCaseAggregate.scala     |   84 -
 .../TestNullValueSerialization.scala            |  112 -
 .../sortexpr/AllDataTypesTestCaseSort.scala     |   54 -
 .../sortexpr/IntegerDataTypeTestCase.scala      |   49 -
 .../windowsexpr/WindowsExprTestCase.scala       |   78 -
 .../spark/util/AllDictionaryTestCase.scala      |  140 -
 .../AutoHighCardinalityIdentifyTestCase.scala   |  186 -
 .../spark/util/DictionaryTestCaseUtil.scala     |   52 -
 .../util/ExternalColumnDictionaryTestCase.scala |  213 -
 ...GlobalDictionaryUtilConcurrentTestCase.scala |  177 -
 .../util/GlobalDictionaryUtilTestCase.scala     |  214 -
 .../plugin.xml                                  |    2 +-
 .../carbonautoagggraphgenerator/plugin.xml      |    2 +-
 .../steps/carbonautoaggslicemerger/plugin.xml   |    2 +-
 .../steps/carboncsvbasedseqgen/plugin.xml       |    2 +-
 .../plugins/steps/carboncsvreader/plugin.xml    |    2 +-
 .../steps/carboncsvreaderstrep/plugin.xml       |    2 +-
 .../plugins/steps/carbondatawriter/plugin.xml   |    2 +-
 .../plugins/steps/carbonfactreader/plugin.xml   |    2 +-
 .../plugins/steps/carbongroupby/plugin.xml      |    2 +-
 .../steps/carboninmemoryfactreader/plugin.xml   |    2 +-
 .../plugins/steps/carbonseqgen/plugin.xml       |    2 +-
 .../plugins/steps/carbonslicemerger/plugin.xml  |    2 +-
 .../steps/carbonsortkeyandgroupby/plugin.xml    |    2 +-
 .../plugins/steps/mdkeygenstep/plugin.xml       |    2 +-
 .../plugins/steps/sortkeystep/plugin.xml        |    2 +-
 processing/pom.xml                              |   10 +-
 .../fileoperations/AtomicFileOperations.java    |   33 +
 .../AtomicFileOperationsImpl.java               |   87 +
 .../lcm/fileoperations/FileWriteOperation.java  |   25 +
 .../lcm/locks/AbstractCarbonLock.java           |   77 +
 .../carbondata/lcm/locks/CarbonLockFactory.java |   94 +
 .../carbondata/lcm/locks/HdfsFileLock.java      |  106 +
 .../carbondata/lcm/locks/ICarbonLock.java       |   40 +
 .../carbondata/lcm/locks/LocalFileLock.java     |  159 +
 .../apache/carbondata/lcm/locks/LockUsage.java  |   31 +
 .../carbondata/lcm/locks/ZooKeeperLocking.java  |  195 +
 .../carbondata/lcm/locks/ZookeeperInit.java     |   82 +
 .../lcm/status/SegmentStatusManager.java        |  507 ++
 .../api/dataloader/DataLoadModel.java           |  201 +
 .../processing/api/dataloader/SchemaInfo.java   |  191 +
 .../constants/DataProcessorConstants.java       |   59 +
 .../processing/csvload/DataGraphExecuter.java   |  635 ++
 .../processing/csvload/GraphExecutionUtil.java  |  362 +
 .../processing/csvreaderstep/CsvInput.java      |  528 ++
 .../processing/csvreaderstep/CsvInputData.java  |   49 +
 .../processing/csvreaderstep/CsvInputMeta.java  |  937 ++
 .../csvreaderstep/CustomDataStream.java         |  126 +
 .../csvreaderstep/UnivocityCsvParser.java       |  176 +
 .../csvreaderstep/UnivocityCsvParserVo.java     |  184 +
 .../csvreaderstep/step-attributes.xml           |  229 +
 .../dataprocessor/DataProcessTaskStatus.java    |  286 +
 .../dataprocessor/IDataProcessStatus.java       |  192 +
 .../manager/CarbonDataProcessorManager.java     |   69 +
 .../processing/dataprocessor/queue/Queue.java   |   47 +
 .../queue/impl/DataProcessorQueue.java          |  106 +
 .../queue/impl/RecordComparator.java            |   45 +
 .../holder/DataProcessorRecordHolder.java       |   65 +
 .../processing/datatypes/ArrayDataType.java     |  289 +
 .../processing/datatypes/GenericDataType.java   |  154 +
 .../processing/datatypes/PrimitiveDataType.java |  268 +
 .../processing/datatypes/StructDataType.java    |  316 +
 .../processing/etl/DataLoadingException.java    |   52 +
 .../exception/CarbonDataProcessorException.java |   80 +
 .../graphgenerator/GraphGenerator.java          |  994 +++
 .../graphgenerator/GraphGeneratorConstants.java |   76 +
 .../graphgenerator/GraphGeneratorException.java |   81 +
 .../configuration/GraphConfigurationInfo.java   | 1061 +++
 .../processing/iterator/CarbonIterator.java     |   38 +
 .../processing/mdkeygen/MDKeyGenStep.java       |  527 ++
 .../processing/mdkeygen/MDKeyGenStepData.java   |   39 +
 .../processing/mdkeygen/MDKeyGenStepMeta.java   |  582 ++
 .../processing/mdkeygen/file/FileData.java      |   74 +
 .../processing/mdkeygen/file/FileManager.java   |   68 +
 .../mdkeygen/file/IFileManagerComposite.java    |   59 +
 .../mdkeygen/messages/messages_en_US.properties |   22 +
 .../merger/exeception/SliceMergerException.java |   80 +
 .../merger/step/CarbonSliceMergerStep.java      |  161 +
 .../merger/step/CarbonSliceMergerStepData.java  |   43 +
 .../merger/step/CarbonSliceMergerStepMeta.java  |  586 ++
 .../schema/metadata/ArrayWrapper.java           |   64 +
 .../schema/metadata/ColumnSchemaDetails.java    |  102 +
 .../metadata/ColumnSchemaDetailsWrapper.java    |  110 +
 .../processing/schema/metadata/ColumnsInfo.java |  504 ++
 .../schema/metadata/HierarchiesInfo.java        |  130 +
 .../schema/metadata/SortObserver.java           |   42 +
 .../processing/schema/metadata/TableOption.java |   82 +
 .../schema/metadata/TableOptionWrapper.java     |  106 +
 .../CarbonSortKeyAndGroupByException.java       |   89 +
 .../sortdata/AbstractTempSortFileReader.java    |  141 +
 .../sortdata/AbstractTempSortFileWriter.java    |  102 +
 .../sortdata/CompressedTempSortFileReader.java  |   52 +
 .../sortdata/CompressedTempSortFileWriter.java  |   79 +
 .../sortdata/FileMergerParameters.java          |  216 +
 .../sortdata/IntermediateFileMerger.java        |  371 +
 .../sortandgroupby/sortdata/RowComparator.java  |   96 +
 .../sortdata/RowComparatorForNormalDims.java    |   64 +
 .../sortandgroupby/sortdata/SortDataRows.java   |  616 ++
 .../sortdata/SortTempFileChunkHolder.java       |  519 ++
 .../sortdata/SortTempFileChunkWriter.java       |   77 +
 .../sortdata/TempSortFileReader.java            |   39 +
 .../sortdata/TempSortFileReaderFactory.java     |   45 +
 .../sortdata/TempSortFileWriter.java            |   48 +
 .../sortdata/TempSortFileWriterFactory.java     |   43 +
 .../UnCompressedTempSortFileReader.java         |   50 +
 .../UnCompressedTempSortFileWriter.java         |  114 +
 .../sortdatastep/SortKeyStep.java               |  277 +
 .../sortdatastep/SortKeyStepMeta.java           |  550 ++
 .../sortdatastep/SortKeyStepData.java           |   53 +
 .../store/CarbonDataFileAttributes.java         |   90 +
 .../store/CarbonFactDataHandlerColumnar.java    | 1312 +++
 .../store/CarbonFactDataHandlerModel.java       |  393 +
 .../processing/store/CarbonFactHandler.java     |   32 +
 .../processing/store/CarbonKeyBlockHolder.java  |   48 +
 .../store/SingleThreadFinalSortFilesMerger.java |  288 +
 .../store/colgroup/ColGroupBlockStorage.java    |   98 +
 .../store/colgroup/ColGroupDataHolder.java      |  103 +
 .../store/colgroup/ColGroupMinMax.java          |  217 +
 .../store/colgroup/ColumnDataHolder.java        |   40 +
 .../processing/store/colgroup/DataHolder.java   |   40 +
 .../store/messages/messages_en_US.properties    |   22 +
 .../store/writer/AbstractFactDataWriter.java    |  698 ++
 .../store/writer/CarbonFactDataWriter.java      |   89 +
 ...actDataWriterImplForIntIndexAndAggBlock.java |  487 ++
 .../processing/store/writer/NodeHolder.java     |  456 +
 .../exception/CarbonDataWriterException.java    |   81 +
 .../csvbased/BadRecordslogger.java              |  169 +
 .../CarbonCSVBasedDimSurrogateKeyGen.java       |  513 ++
 .../csvbased/CarbonCSVBasedSeqGenData.java      |  173 +
 .../csvbased/CarbonCSVBasedSeqGenMeta.java      | 1707 ++++
 .../csvbased/CarbonCSVBasedSeqGenStep.java      | 1869 ++++
 .../FileStoreSurrogateKeyGenForCSV.java         |  402 +
 .../dbbased/CarbonDimSurrogateKeyGen.java       |  286 +
 .../dbbased/HierarchyValueWriter.java           |  155 +
 .../dbbased/IntArrayWrapper.java                |  110 +
 .../dbbased/messages/messages_en_US.properties  |   61 +
 .../util/CarbonDataProcessorUtil.java           |  284 +
 .../processing/util/CarbonSchemaParser.java     | 1281 +++
 .../processing/util/RemoveDictionaryUtil.java   |  426 +
 .../fileoperations/AtomicFileOperations.java    |   33 -
 .../AtomicFileOperationsImpl.java               |   87 -
 .../lcm/fileoperations/FileWriteOperation.java  |   25 -
 .../lcm/locks/AbstractCarbonLock.java           |   77 -
 .../carbondata/lcm/locks/CarbonLockFactory.java |   94 -
 .../org/carbondata/lcm/locks/HdfsFileLock.java  |  106 -
 .../org/carbondata/lcm/locks/ICarbonLock.java   |   40 -
 .../org/carbondata/lcm/locks/LocalFileLock.java |  159 -
 .../org/carbondata/lcm/locks/LockUsage.java     |   31 -
 .../carbondata/lcm/locks/ZooKeeperLocking.java  |  195 -
 .../org/carbondata/lcm/locks/ZookeeperInit.java |   82 -
 .../lcm/status/SegmentStatusManager.java        |  500 --
 .../api/dataloader/DataLoadModel.java           |  201 -
 .../processing/api/dataloader/SchemaInfo.java   |  191 -
 .../constants/DataProcessorConstants.java       |   59 -
 .../processing/csvload/DataGraphExecuter.java   |  635 --
 .../processing/csvload/GraphExecutionUtil.java  |  362 -
 .../processing/csvreaderstep/CsvInput.java      |  528 --
 .../processing/csvreaderstep/CsvInputData.java  |   49 -
 .../processing/csvreaderstep/CsvInputMeta.java  |  937 --
 .../csvreaderstep/CustomDataStream.java         |  126 -
 .../csvreaderstep/UnivocityCsvParser.java       |  176 -
 .../csvreaderstep/UnivocityCsvParserVo.java     |  184 -
 .../csvreaderstep/step-attributes.xml           |  229 -
 .../dataprocessor/DataProcessTaskStatus.java    |  286 -
 .../dataprocessor/IDataProcessStatus.java       |  192 -
 .../manager/CarbonDataProcessorManager.java     |   69 -
 .../processing/dataprocessor/queue/Queue.java   |   47 -
 .../queue/impl/DataProcessorQueue.java          |  106 -
 .../queue/impl/RecordComparator.java            |   45 -
 .../holder/DataProcessorRecordHolder.java       |   65 -
 .../processing/datatypes/ArrayDataType.java     |  289 -
 .../processing/datatypes/GenericDataType.java   |  154 -
 .../processing/datatypes/PrimitiveDataType.java |  268 -
 .../processing/datatypes/StructDataType.java    |  316 -
 .../processing/etl/DataLoadingException.java    |   52 -
 .../exception/CarbonDataProcessorException.java |   80 -
 .../graphgenerator/GraphGenerator.java          |  987 ---
 .../graphgenerator/GraphGeneratorConstants.java |   76 -
 .../graphgenerator/GraphGeneratorException.java |   81 -
 .../configuration/GraphConfigurationInfo.java   | 1061 ---
 .../processing/iterator/CarbonIterator.java     |   38 -
 .../processing/mdkeygen/MDKeyGenStep.java       |  527 --
 .../processing/mdkeygen/MDKeyGenStepData.java   |   39 -
 .../processing/mdkeygen/MDKeyGenStepMeta.java   |  582 --
 .../processing/mdkeygen/file/FileData.java      |   74 -
 .../processing/mdkeygen/file/FileManager.java   |   68 -
 .../mdkeygen/file/IFileManagerComposite.java    |   59 -
 .../mdkeygen/messages/messages_en_US.properties |   22 -
 .../merger/exeception/SliceMergerException.java |   80 -
 .../merger/step/CarbonSliceMergerStep.java      |  161 -
 .../merger/step/CarbonSliceMergerStepData.java  |   43 -
 .../merger/step/CarbonSliceMergerStepMeta.java  |  586 --
 .../schema/metadata/ArrayWrapper.java           |   64 -
 .../schema/metadata/ColumnSchemaDetails.java    |  102 -
 .../metadata/ColumnSchemaDetailsWrapper.java    |  110 -
 .../processing/schema/metadata/ColumnsInfo.java |  504 --
 .../schema/metadata/HierarchiesInfo.java        |  130 -
 .../schema/metadata/SortObserver.java           |   42 -
 .../processing/schema/metadata/TableOption.java |   82 -
 .../schema/metadata/TableOptionWrapper.java     |  106 -
 .../CarbonSortKeyAndGroupByException.java       |   89 -
 .../sortdata/AbstractTempSortFileReader.java    |  141 -
 .../sortdata/AbstractTempSortFileWriter.java    |  102 -
 .../sortdata/CompressedTempSortFileReader.java  |   52 -
 .../sortdata/CompressedTempSortFileWriter.java  |   79 -
 .../sortdata/FileMergerParameters.java          |  216 -
 .../sortdata/IntermediateFileMerger.java        |  371 -
 .../sortandgroupby/sortdata/RowComparator.java  |   96 -
 .../sortdata/RowComparatorForNormalDims.java    |   64 -
 .../sortandgroupby/sortdata/SortDataRows.java   |  616 --
 .../sortdata/SortTempFileChunkHolder.java       |  519 --
 .../sortdata/SortTempFileChunkWriter.java       |   77 -
 .../sortdata/TempSortFileReader.java            |   39 -
 .../sortdata/TempSortFileReaderFactory.java     |   45 -
 .../sortdata/TempSortFileWriter.java            |   48 -
 .../sortdata/TempSortFileWriterFactory.java     |   43 -
 .../UnCompressedTempSortFileReader.java         |   50 -
 .../UnCompressedTempSortFileWriter.java         |  114 -
 .../sortdatastep/SortKeyStep.java               |  276 -
 .../sortdatastep/SortKeyStepData.java           |   53 -
 .../sortdatastep/SortKeyStepMeta.java           |  549 --
 .../store/CarbonDataFileAttributes.java         |   90 -
 .../store/CarbonFactDataHandlerColumnar.java    | 1312 ---
 .../store/CarbonFactDataHandlerModel.java       |  393 -
 .../processing/store/CarbonFactHandler.java     |   32 -
 .../processing/store/CarbonKeyBlockHolder.java  |   48 -
 .../store/SingleThreadFinalSortFilesMerger.java |  288 -
 .../store/colgroup/ColGroupBlockStorage.java    |   98 -
 .../store/colgroup/ColGroupDataHolder.java      |  103 -
 .../store/colgroup/ColGroupMinMax.java          |  217 -
 .../store/colgroup/ColumnDataHolder.java        |   40 -
 .../processing/store/colgroup/DataHolder.java   |   40 -
 .../store/messages/messages_en_US.properties    |   22 -
 .../store/writer/AbstractFactDataWriter.java    |  698 --
 .../store/writer/CarbonFactDataWriter.java      |   89 -
 ...actDataWriterImplForIntIndexAndAggBlock.java |  487 --
 .../processing/store/writer/NodeHolder.java     |  456 -
 .../exception/CarbonDataWriterException.java    |   81 -
 .../csvbased/BadRecordslogger.java              |  169 -
 .../CarbonCSVBasedDimSurrogateKeyGen.java       |  513 --
 .../csvbased/CarbonCSVBasedSeqGenData.java      |  173 -
 .../csvbased/CarbonCSVBasedSeqGenMeta.java      | 1707 ----
 .../csvbased/CarbonCSVBasedSeqGenStep.java      | 1869 ----
 .../FileStoreSurrogateKeyGenForCSV.java         |  402 -
 .../dbbased/CarbonDimSurrogateKeyGen.java       |  286 -
 .../dbbased/HierarchyValueWriter.java           |  155 -
 .../dbbased/IntArrayWrapper.java                |  110 -
 .../dbbased/messages/messages_en_US.properties  |   61 -
 .../util/CarbonDataProcessorUtil.java           |  284 -
 .../processing/util/CarbonSchemaParser.java     | 1281 ---
 .../processing/util/RemoveDictionaryUtil.java   |  426 -
 .../TimeStampDirectDictionaryGenerator_UT.java  |   75 +
 .../carbondata/lcm/locks/LocalFileLockTest.java |   63 +
 .../lcm/locks/ZooKeeperLockingTest.java         |  143 +
 .../store/colgroup/ColGroupMinMaxTest.java      |  253 +
 .../TimeStampDirectDictionaryGenerator_UT.java  |   75 -
 .../carbondata/lcm/locks/LocalFileLockTest.java |   66 -
 .../lcm/locks/ZooKeeperLockingTest.java         |  143 -
 .../store/colgroup/ColGroupMinMaxTest.java      |  257 -
 1398 files changed, 134329 insertions(+), 134344 deletions(-)
----------------------------------------------------------------------



[46/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TableTaskInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TableTaskInfo.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TableTaskInfo.java
new file mode 100644
index 0000000..1f8caf0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TableTaskInfo.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.block;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+/**
+ * This class is responsible for maintaining the mapping of tasks of a node.
+ */
+public class TableTaskInfo extends Distributable {
+
+  private final List<TableBlockInfo> tableBlockInfoList;
+  private final String taskId;
+  public String getTaskId() {
+    return taskId;
+  }
+
+  public List<TableBlockInfo> getTableBlockInfoList() {
+    return tableBlockInfoList;
+  }
+
+  public TableTaskInfo(String taskId, List<TableBlockInfo> tableBlockInfoList){
+    this.taskId = taskId;
+    this.tableBlockInfoList = tableBlockInfoList;
+  }
+
+  @Override public String[] getLocations() {
+    Set<String> locations = new HashSet<String>();
+    for(TableBlockInfo tableBlockInfo: tableBlockInfoList){
+      locations.addAll(Arrays.asList(tableBlockInfo.getLocations()));
+    }
+    locations.toArray(new String[locations.size()]);
+    List<String> nodes =  TableTaskInfo.maxNoNodes(tableBlockInfoList);
+    return nodes.toArray(new String[nodes.size()]);
+  }
+
+  @Override public int compareTo(Distributable o) {
+    return taskId.compareTo(((TableTaskInfo)o).getTaskId());
+  }
+
+  /**
+   * Finding which node has the maximum number of blocks for it.
+   * @param blockList
+   * @return
+   */
+  public static List<String> maxNoNodes(List<TableBlockInfo> blockList) {
+    boolean useIndex = true;
+    Integer maxOccurence = 0;
+    String maxNode = null;
+    Map<String, Integer> nodeAndOccurenceMapping = new TreeMap<>();
+
+    // populate the map of node and number of occurences of that node.
+    for (TableBlockInfo block : blockList) {
+      for (String node : block.getLocations()) {
+        Integer nodeOccurence = nodeAndOccurenceMapping.get(node);
+        if (null == nodeOccurence) {
+          nodeAndOccurenceMapping.put(node, 1);
+        } else {
+          nodeOccurence++;
+        }
+      }
+    }
+    Integer previousValueOccurence = null;
+
+    // check which node is occured maximum times.
+    for (Map.Entry<String, Integer> entry : nodeAndOccurenceMapping.entrySet()) {
+      // finding the maximum node.
+      if (entry.getValue() > maxOccurence) {
+        maxOccurence = entry.getValue();
+        maxNode = entry.getKey();
+      }
+      // first time scenario. initialzing the previous value.
+      if (null == previousValueOccurence) {
+        previousValueOccurence = entry.getValue();
+      } else {
+        // for the case where all the nodes have same number of blocks then
+        // we need to return complete list instead of max node.
+        if (previousValueOccurence != entry.getValue()) {
+          useIndex = false;
+        }
+      }
+    }
+
+    // if all the nodes have equal occurence then returning the complete key set.
+    if (useIndex) {
+      return new ArrayList<>(nodeAndOccurenceMapping.keySet());
+    }
+
+    // if any max node is found then returning the max node.
+    List<String> node =  new ArrayList<>(1);
+    node.add(maxNode);
+    return node;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TaskBlockInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TaskBlockInfo.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TaskBlockInfo.java
new file mode 100644
index 0000000..1221cc1
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/block/TaskBlockInfo.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.datastore.block;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * This class contains blocks info of each task
+ */
+public class TaskBlockInfo {
+
+  // stores TableBlockInfo list of each task
+  private Map<String, List<TableBlockInfo>> taskBlockInfoMapping;
+
+  public TaskBlockInfo(){
+
+    taskBlockInfoMapping = new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+  }
+
+  /**
+   * returns task set
+   * @return
+   */
+  public Set<String> getTaskSet() {
+    return taskBlockInfoMapping.keySet();
+  }
+
+
+  /**
+   * returns TableBlockInfoList of given task
+   * @return
+   */
+  public List<TableBlockInfo> getTableBlockInfoList(String task) {
+    return taskBlockInfoMapping.get(task);
+  }
+
+  /**
+   *  maps TableBlockInfoList to respective task
+   * @param task
+   * @param tableBlockInfoList
+   */
+  public void addTableBlockInfoList(String task, List<TableBlockInfo> tableBlockInfoList) {
+    taskBlockInfoMapping.put(task, tableBlockInfoList);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/DimensionChunkAttributes.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/DimensionChunkAttributes.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/DimensionChunkAttributes.java
new file mode 100644
index 0000000..4dcf083
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/DimensionChunkAttributes.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk;
+
+/**
+ * Dimension chunk attributes which holds all the
+ * property about the dimension chunk data
+ */
+public class DimensionChunkAttributes {
+
+  /**
+   * inverted index of the data
+   */
+  private int[] invertedIndexes;
+
+  /**
+   * reverse index of the data
+   */
+  private int[] invertedIndexesReverse;
+
+  /**
+   * each row size
+   */
+  private int columnValueSize;
+
+  /**
+   * is no dictionary
+   */
+  private boolean isNoDictionary;
+
+  /**
+   * @return the invertedIndexes
+   */
+  public int[] getInvertedIndexes() {
+    return invertedIndexes;
+  }
+
+  /**
+   * @param invertedIndexes the invertedIndexes to set
+   */
+  public void setInvertedIndexes(int[] invertedIndexes) {
+    this.invertedIndexes = invertedIndexes;
+  }
+
+  /**
+   * @return the invertedIndexesReverse
+   */
+  public int[] getInvertedIndexesReverse() {
+    return invertedIndexesReverse;
+  }
+
+  /**
+   * @param invertedIndexesReverse the invertedIndexesReverse to set
+   */
+  public void setInvertedIndexesReverse(int[] invertedIndexesReverse) {
+    this.invertedIndexesReverse = invertedIndexesReverse;
+  }
+
+  /**
+   * @return the eachRowSize
+   */
+  public int getColumnValueSize() {
+    return columnValueSize;
+  }
+
+  /**
+   * @param eachRowSize the eachRowSize to set
+   */
+  public void setEachRowSize(int eachRowSize) {
+    this.columnValueSize = eachRowSize;
+  }
+
+  /**
+   * @return the isNoDictionary
+   */
+  public boolean isNoDictionary() {
+    return isNoDictionary;
+  }
+
+  /**
+   * @param isNoDictionary the isNoDictionary to set
+   */
+  public void setNoDictionary(boolean isNoDictionary) {
+    this.isNoDictionary = isNoDictionary;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/DimensionColumnDataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/DimensionColumnDataChunk.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/DimensionColumnDataChunk.java
new file mode 100644
index 0000000..ddc76c0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/DimensionColumnDataChunk.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk;
+
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+
+/**
+ * Interface for dimension column chunk.
+ */
+public interface DimensionColumnDataChunk<T> {
+
+  /**
+   * Below method will be used to fill the data based on offset and row id
+   *
+   * @param data   data to filed
+   * @param offset offset from which data need to be filed
+   * @param rowId  row id of the chunk
+   * @return how many bytes was copied
+   */
+  int fillChunkData(byte[] data, int offset, int columnIndex, KeyStructureInfo restructuringInfo);
+
+  /**
+   * It uses to convert column data to dictionary integer value
+   * @param rowId
+   * @param columnIndex
+   * @param row
+   * @param restructuringInfo  @return
+   */
+  int fillConvertedChunkData(int rowId, int columnIndex, int[] row,
+      KeyStructureInfo restructuringInfo);
+
+  /**
+   * Below method to get  the data based in row id
+   *
+   * @param row id
+   *            row id of the data
+   * @return chunk
+   */
+  byte[] getChunkData(int columnIndex);
+
+  /**
+   * Below method will be used get the chunk attributes
+   *
+   * @return chunk attributes
+   */
+  DimensionChunkAttributes getAttributes();
+
+  /**
+   * Below method will be used to return the complete data chunk
+   * This will be required during filter query
+   *
+   * @return complete chunk
+   */
+  T getCompleteDataChunk();
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/MeasureColumnDataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/MeasureColumnDataChunk.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/MeasureColumnDataChunk.java
new file mode 100644
index 0000000..fbe6e95
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/MeasureColumnDataChunk.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk;
+
+import org.apache.carbondata.core.carbon.metadata.blocklet.datachunk.PresenceMeta;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+
+/**
+ * Holder for measure column chunk
+ * it will have data and its attributes which will
+ * be required for processing
+ */
+public class MeasureColumnDataChunk {
+
+  /**
+   * measure chunk
+   */
+  private CarbonReadDataHolder measureDataHolder;
+
+  /**
+   * below to hold null value holds this information
+   * about the null value index this will be helpful in case of
+   * to remove the null value while aggregation
+   */
+  private PresenceMeta nullValueIndexHolder;
+
+  /**
+   * @return the measureDataHolder
+   */
+  public CarbonReadDataHolder getMeasureDataHolder() {
+    return measureDataHolder;
+  }
+
+  /**
+   * @param measureDataHolder the measureDataHolder to set
+   */
+  public void setMeasureDataHolder(CarbonReadDataHolder measureDataHolder) {
+    this.measureDataHolder = measureDataHolder;
+  }
+
+  /**
+   * @return the nullValueIndexHolder
+   */
+  public PresenceMeta getNullValueIndexHolder() {
+    return nullValueIndexHolder;
+  }
+
+  /**
+   * @param nullValueIndexHolder the nullValueIndexHolder to set
+   */
+  public void setNullValueIndexHolder(PresenceMeta nullValueIndexHolder) {
+    this.nullValueIndexHolder = nullValueIndexHolder;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/ColumnGroupDimensionDataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/ColumnGroupDimensionDataChunk.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/ColumnGroupDimensionDataChunk.java
new file mode 100644
index 0000000..77fb163
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/ColumnGroupDimensionDataChunk.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk.impl;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionChunkAttributes;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+
+/**
+ * This class is holder of the dimension column chunk data of the fixed length
+ * key size
+ */
+public class ColumnGroupDimensionDataChunk implements DimensionColumnDataChunk<byte[]> {
+
+  /**
+   * dimension chunk attributes
+   */
+  private DimensionChunkAttributes chunkAttributes;
+
+  /**
+   * data chunks
+   */
+  private byte[] dataChunk;
+
+  /**
+   * Constructor for this class
+   *
+   * @param dataChunk       data chunk
+   * @param chunkAttributes chunk attributes
+   */
+  public ColumnGroupDimensionDataChunk(byte[] dataChunk, DimensionChunkAttributes chunkAttributes) {
+    this.chunkAttributes = chunkAttributes;
+    this.dataChunk = dataChunk;
+  }
+
+  /**
+   * Below method will be used to fill the data based on offset and row id
+   *
+   * @param data             data to filed
+   * @param offset           offset from which data need to be filed
+   * @param rowId            row id of the chunk
+   * @param restructuringInfo define the structure of the key
+   * @return how many bytes was copied
+   */
+  @Override public int fillChunkData(byte[] data, int offset, int rowId,
+      KeyStructureInfo restructuringInfo) {
+    byte[] maskedKey =
+        getMaskedKey(dataChunk, rowId * chunkAttributes.getColumnValueSize(), restructuringInfo);
+    System.arraycopy(maskedKey, 0, data, offset, maskedKey.length);
+    return maskedKey.length;
+  }
+
+  /**
+   * Converts to column dictionary integer value
+   */
+  @Override public int fillConvertedChunkData(int rowId, int columnIndex, int[] row,
+      KeyStructureInfo info) {
+    int start = rowId * chunkAttributes.getColumnValueSize();
+    long[] keyArray = info.getKeyGenerator().getKeyArray(dataChunk, start);
+    int[] ordinal = info.getMdkeyQueryDimensionOrdinal();
+    for (int i = 0; i < ordinal.length; i++) {
+      row[columnIndex++] = (int)keyArray[ordinal[i]];
+    }
+    return columnIndex;
+  }
+
+  /**
+   * Below method masks key
+   *
+   */
+  public byte[] getMaskedKey(byte[] data, int offset, KeyStructureInfo info) {
+    byte[] maskedKey = new byte[info.getMaskByteRanges().length];
+    int counter = 0;
+    int byteRange = 0;
+    for (int i = 0; i < info.getMaskByteRanges().length; i++) {
+      byteRange = info.getMaskByteRanges()[i];
+      maskedKey[counter++] = (byte) (data[byteRange + offset] & info.getMaxKey()[byteRange]);
+    }
+    return maskedKey;
+  }
+
+  /**
+   * Below method to get the data based in row id
+   *
+   * @param rowId row id of the data
+   * @return chunk
+   */
+  @Override public byte[] getChunkData(int rowId) {
+    byte[] data = new byte[chunkAttributes.getColumnValueSize()];
+    System.arraycopy(dataChunk, rowId * data.length, data, 0, data.length);
+    return data;
+  }
+
+  /**
+   * Below method will be used get the chunk attributes
+   *
+   * @return chunk attributes
+   */
+  @Override public DimensionChunkAttributes getAttributes() {
+    return chunkAttributes;
+  }
+
+  /**
+   * Below method will be used to return the complete data chunk
+   * This will be required during filter query
+   *
+   * @return complete chunk
+   */
+  @Override public byte[] getCompleteDataChunk() {
+    return dataChunk;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/FixedLengthDimensionDataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/FixedLengthDimensionDataChunk.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/FixedLengthDimensionDataChunk.java
new file mode 100644
index 0000000..2867d76
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/FixedLengthDimensionDataChunk.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk.impl;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionChunkAttributes;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+
+/**
+ * This class is holder of the dimension column chunk data of the fixed length
+ * key size
+ */
+public class FixedLengthDimensionDataChunk implements DimensionColumnDataChunk<byte[]> {
+
+  /**
+   * dimension chunk attributes
+   */
+  private DimensionChunkAttributes chunkAttributes;
+
+  /**
+   * data chunks
+   */
+  private byte[] dataChunk;
+
+  /**
+   * Constructor for this class
+   *
+   * @param dataChunk       data chunk
+   * @param chunkAttributes chunk attributes
+   */
+  public FixedLengthDimensionDataChunk(byte[] dataChunk, DimensionChunkAttributes chunkAttributes) {
+    this.chunkAttributes = chunkAttributes;
+    this.dataChunk = dataChunk;
+  }
+
+  /**
+   * Below method will be used to fill the data based on offset and row id
+   *
+   * @param data             data to filed
+   * @param offset           offset from which data need to be filed
+   * @param index            row id of the chunk
+   * @param keyStructureInfo define the structure of the key
+   * @return how many bytes was copied
+   */
+  @Override public int fillChunkData(byte[] data, int offset, int index,
+      KeyStructureInfo keyStructureInfo) {
+    if (chunkAttributes.getInvertedIndexes() != null) {
+      index = chunkAttributes.getInvertedIndexesReverse()[index];
+    }
+    System.arraycopy(dataChunk, index * chunkAttributes.getColumnValueSize(), data, offset,
+        chunkAttributes.getColumnValueSize());
+    return chunkAttributes.getColumnValueSize();
+  }
+
+  /**
+   * Converts to column dictionary integer value
+   */
+  @Override public int fillConvertedChunkData(int rowId, int columnIndex, int[] row,
+      KeyStructureInfo restructuringInfo) {
+    if (chunkAttributes.getInvertedIndexes() != null) {
+      rowId = chunkAttributes.getInvertedIndexesReverse()[rowId];
+    }
+    int start = rowId * chunkAttributes.getColumnValueSize();
+    int dict = 0;
+    for (int i = start; i < start + chunkAttributes.getColumnValueSize(); i++) {
+      dict <<= 8;
+      dict ^= dataChunk[i] & 0xFF;
+    }
+    row[columnIndex] = dict;
+    return columnIndex + 1;
+  }
+
+  /**
+   * Below method to get the data based in row id
+   *
+   * @param index row id of the data
+   * @return chunk
+   */
+  @Override public byte[] getChunkData(int index) {
+    byte[] data = new byte[chunkAttributes.getColumnValueSize()];
+    if (chunkAttributes.getInvertedIndexes() != null) {
+      index = chunkAttributes.getInvertedIndexesReverse()[index];
+    }
+    System.arraycopy(dataChunk, index * chunkAttributes.getColumnValueSize(), data, 0,
+        chunkAttributes.getColumnValueSize());
+    return data;
+  }
+
+  /**
+   * Below method will be used get the chunk attributes
+   *
+   * @return chunk attributes
+   */
+  @Override public DimensionChunkAttributes getAttributes() {
+    return chunkAttributes;
+  }
+
+  /**
+   * Below method will be used to return the complete data chunk
+   * This will be required during filter query
+   *
+   * @return complete chunk
+   */
+  @Override public byte[] getCompleteDataChunk() {
+    return dataChunk;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/VariableLengthDimensionDataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/VariableLengthDimensionDataChunk.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/VariableLengthDimensionDataChunk.java
new file mode 100644
index 0000000..6d2d400
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/impl/VariableLengthDimensionDataChunk.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk.impl;
+
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionChunkAttributes;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+
+/**
+ * This class is holder of the dimension column chunk data of the variable
+ * length key size
+ */
+public class VariableLengthDimensionDataChunk implements DimensionColumnDataChunk<List<byte[]>> {
+
+  /**
+   * dimension chunk attributes
+   */
+  private DimensionChunkAttributes chunkAttributes;
+
+  /**
+   * data chunk
+   */
+  private List<byte[]> dataChunk;
+
+  /**
+   * Constructor for this class
+   *
+   * @param dataChunk       data chunk
+   * @param chunkAttributes chunk attributes
+   */
+  public VariableLengthDimensionDataChunk(List<byte[]> dataChunk,
+      DimensionChunkAttributes chunkAttributes) {
+    this.chunkAttributes = chunkAttributes;
+    this.dataChunk = dataChunk;
+  }
+
+  /**
+   * Below method will be used to fill the data based on offset and row id
+   *
+   * @param data             data to filed
+   * @param offset           offset from which data need to be filed
+   * @param index            row id of the chunk
+   * @param restructuringInfo define the structure of the key
+   * @return how many bytes was copied
+   */
+  @Override public int fillChunkData(byte[] data, int offset, int index,
+      KeyStructureInfo restructuringInfo) {
+    // no required in this case because this column chunk is not the part if
+    // mdkey
+    return 0;
+  }
+
+  /**
+   * Converts to column dictionary integer value
+   * @param rowId
+   * @param columnIndex
+   * @param row
+   * @param restructuringInfo  @return
+   */
+  @Override public int fillConvertedChunkData(int rowId, int columnIndex, int[] row,
+      KeyStructureInfo restructuringInfo) {
+    return columnIndex + 1;
+  }
+
+  /**
+   * Below method to get the data based in row id
+   *
+   * @param index row id of the data
+   * @return chunk
+   */
+  @Override public byte[] getChunkData(int index) {
+    if (null != chunkAttributes.getInvertedIndexes()) {
+      index = chunkAttributes.getInvertedIndexesReverse()[index];
+    }
+    return dataChunk.get(index);
+  }
+
+  /**
+   * Below method will be used get the chunk attributes
+   *
+   * @return chunk attributes
+   */
+  @Override public DimensionChunkAttributes getAttributes() {
+    return chunkAttributes;
+  }
+
+  /**
+   * Below method will be used to return the complete data chunk
+   * This will be required during filter query
+   *
+   * @return complete chunk
+   */
+  @Override public List<byte[]> getCompleteDataChunk() {
+    return dataChunk;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/DimensionColumnChunkReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/DimensionColumnChunkReader.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/DimensionColumnChunkReader.java
new file mode 100644
index 0000000..b958245
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/DimensionColumnChunkReader.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk.reader;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+/**
+ * Interface for reading the data chunk
+ * Its concrete implementation can be used to read the chunk.
+ * compressed or uncompressed chunk
+ */
+public interface DimensionColumnChunkReader {
+
+  /**
+   * Below method will be used to read the chunk based on block indexes
+   *
+   * @param fileReader   file reader to read the blocks from file
+   * @param blockIndexes blocks to be read
+   * @return dimension column chunks
+   */
+  DimensionColumnDataChunk[] readDimensionChunks(FileHolder fileReader, int... blockIndexes);
+
+  /**
+   * Below method will be used to read the chunk based on block index
+   *
+   * @param fileReader file reader to read the blocks from file
+   * @param blockIndex block to be read
+   * @return dimension column chunk
+   */
+  DimensionColumnDataChunk readDimensionChunk(FileHolder fileReader, int blockIndex);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/MeasureColumnChunkReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/MeasureColumnChunkReader.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/MeasureColumnChunkReader.java
new file mode 100644
index 0000000..8a7c8ef
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/MeasureColumnChunkReader.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk.reader;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+/**
+ * Reader interface for reading the measure blocks from file
+ */
+public interface MeasureColumnChunkReader {
+
+  /**
+   * Method to read the blocks data based on block indexes
+   *
+   * @param fileReader   file reader to read the blocks
+   * @param blockIndexes blocks to be read
+   * @return measure data chunks
+   */
+  MeasureColumnDataChunk[] readMeasureChunks(FileHolder fileReader, int... blockIndexes);
+
+  /**
+   * Method to read the blocks data based on block index
+   *
+   * @param fileReader file reader to read the blocks
+   * @param blockIndex block to be read
+   * @return measure data chunk
+   */
+  MeasureColumnDataChunk readMeasureChunk(FileHolder fileReader, int blockIndex);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/dimension/AbstractChunkReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/dimension/AbstractChunkReader.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/dimension/AbstractChunkReader.java
new file mode 100644
index 0000000..59dcd38
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/dimension/AbstractChunkReader.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk.reader.dimension;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.reader.DimensionColumnChunkReader;
+import org.apache.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.compression.Compressor;
+import org.apache.carbondata.core.datastorage.store.compression.SnappyCompression;
+import org.apache.carbondata.core.keygenerator.mdkey.NumberCompressor;
+import org.apache.carbondata.core.util.CarbonProperties;
+
+/**
+ * Class which will have all the common properties and behavior among all type
+ * of reader
+ */
+public abstract class AbstractChunkReader implements DimensionColumnChunkReader {
+
+  /**
+   * compressor will be used to uncompress the data
+   */
+  protected static final Compressor<byte[]> COMPRESSOR =
+      SnappyCompression.SnappyByteCompression.INSTANCE;
+
+  /**
+   * data chunk list which holds the information
+   * about the data block metadata
+   */
+  protected List<DataChunk> dimensionColumnChunk;
+
+  /**
+   * size of the each column value
+   * for no dictionary column it will be -1
+   */
+  protected int[] eachColumnValueSize;
+
+  /**
+   * full qualified path of the data file from
+   * which data will be read
+   */
+  protected String filePath;
+
+  /**
+   * this will be used to uncompress the
+   * row id and rle chunk
+   */
+  protected NumberCompressor numberComressor;
+
+  /**
+   * number of element in each chunk
+   */
+  private int numberOfElement;
+
+  /**
+   * Constructor to get minimum parameter to create
+   * instance of this class
+   *
+   * @param dimensionColumnChunk dimension chunk metadata
+   * @param eachColumnValueSize  size of the each column value
+   * @param filePath             file from which data will be read
+   */
+  public AbstractChunkReader(List<DataChunk> dimensionColumnChunk, int[] eachColumnValueSize,
+      String filePath) {
+    this.dimensionColumnChunk = dimensionColumnChunk;
+    this.eachColumnValueSize = eachColumnValueSize;
+    this.filePath = filePath;
+    int numberOfElement = 0;
+    try {
+      numberOfElement = Integer.parseInt(CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.BLOCKLET_SIZE,
+              CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL));
+    } catch (NumberFormatException exception) {
+      numberOfElement = Integer.parseInt(CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL);
+    }
+    this.numberComressor = new NumberCompressor(numberOfElement);
+  }
+
+  /**
+   * Below method will be used to create the inverted index reverse
+   * this will be used to point to actual data in the chunk
+   *
+   * @param invertedIndex inverted index
+   * @return reverse inverted index
+   */
+  protected int[] getInvertedReverseIndex(int[] invertedIndex) {
+    int[] columnIndexTemp = new int[invertedIndex.length];
+
+    for (int i = 0; i < invertedIndex.length; i++) {
+      columnIndexTemp[invertedIndex[i]] = i;
+    }
+    return columnIndexTemp;
+  }
+
+  /**
+   * In case of no dictionary column size of the each column value
+   * will not be same, so in case of filter query we can not take
+   * advantage of binary search as length with each value will be also
+   * store with the data, so converting this data to two dimension
+   * array format filter query processing will be faster
+   *
+   * @param dataChunkWithLength no dictionary column chunk
+   *                            <Lenght><Data><Lenght><data>
+   *                            Length will store in 2 bytes
+   * @return list of data chuck, one value in list will represent one column value
+   */
+  protected List<byte[]> getNoDictionaryDataChunk(byte[] dataChunkWithLength) {
+    List<byte[]> dataChunk = new ArrayList<byte[]>(numberOfElement);
+    // wrapping the chunk to byte buffer
+    ByteBuffer buffer = ByteBuffer.wrap(dataChunkWithLength);
+    buffer.rewind();
+    byte[] data = null;
+    // iterating till all the elements are read
+    while (buffer.hasRemaining()) {
+      // as all the data is stored with length(2 bytes)
+      // first reading the size and then based on size
+      // we need to read the actual value
+      data = new byte[buffer.getShort()];
+      buffer.get(data);
+      dataChunk.add(data);
+    }
+    return dataChunk;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/dimension/CompressedDimensionChunkFileBasedReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/dimension/CompressedDimensionChunkFileBasedReader.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/dimension/CompressedDimensionChunkFileBasedReader.java
new file mode 100644
index 0000000..209217b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/dimension/CompressedDimensionChunkFileBasedReader.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk.reader.dimension;
+
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionChunkAttributes;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.ColumnGroupDimensionDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.VariableLengthDimensionDataChunk;
+import org.apache.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.columnar.UnBlockIndexer;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+/**
+ * Compressed dimension chunk reader class
+ */
+public class CompressedDimensionChunkFileBasedReader extends AbstractChunkReader {
+
+  /**
+   * Constructor to get minimum parameter to create instance of this class
+   *
+   * @param dimensionColumnChunk dimension chunk metadata
+   * @param eachColumnValueSize  size of the each column value
+   * @param filePath             file from which data will be read
+   */
+  public CompressedDimensionChunkFileBasedReader(List<DataChunk> dimensionColumnChunk,
+      int[] eachColumnValueSize, String filePath) {
+    super(dimensionColumnChunk, eachColumnValueSize, filePath);
+  }
+
+  /**
+   * Below method will be used to read the chunk based on block indexes
+   *
+   * @param fileReader   file reader to read the blocks from file
+   * @param blockIndexes blocks to be read
+   * @return dimension column chunks
+   */
+  @Override public DimensionColumnDataChunk[] readDimensionChunks(FileHolder fileReader,
+      int... blockIndexes) {
+    // read the column chunk based on block index and add
+    DimensionColumnDataChunk[] dataChunks =
+        new DimensionColumnDataChunk[dimensionColumnChunk.size()];
+    for (int i = 0; i < blockIndexes.length; i++) {
+      dataChunks[blockIndexes[i]] = readDimensionChunk(fileReader, blockIndexes[i]);
+    }
+    return dataChunks;
+  }
+
+  /**
+   * Below method will be used to read the chunk based on block index
+   *
+   * @param fileReader file reader to read the blocks from file
+   * @param blockIndex block to be read
+   * @return dimension column chunk
+   */
+  @Override public DimensionColumnDataChunk readDimensionChunk(FileHolder fileReader,
+      int blockIndex) {
+    byte[] dataPage = null;
+    int[] invertedIndexes = null;
+    int[] invertedIndexesReverse = null;
+    int[] rlePage = null;
+
+    // first read the data and uncompressed it
+    dataPage = COMPRESSOR.unCompress(fileReader
+        .readByteArray(filePath, dimensionColumnChunk.get(blockIndex).getDataPageOffset(),
+            dimensionColumnChunk.get(blockIndex).getDataPageLength()));
+    // if row id block is present then read the row id chunk and uncompress it
+    if (CarbonUtil.hasEncoding(dimensionColumnChunk.get(blockIndex).getEncodingList(),
+        Encoding.INVERTED_INDEX)) {
+      invertedIndexes = CarbonUtil
+          .getUnCompressColumnIndex(dimensionColumnChunk.get(blockIndex).getRowIdPageLength(),
+              fileReader.readByteArray(filePath,
+                  dimensionColumnChunk.get(blockIndex).getRowIdPageOffset(),
+                  dimensionColumnChunk.get(blockIndex).getRowIdPageLength()), numberComressor);
+      // get the reverse index
+      invertedIndexesReverse = getInvertedReverseIndex(invertedIndexes);
+    }
+    // if rle is applied then read the rle block chunk and then uncompress
+    //then actual data based on rle block
+    if (CarbonUtil
+        .hasEncoding(dimensionColumnChunk.get(blockIndex).getEncodingList(), Encoding.RLE)) {
+      // read and uncompress the rle block
+      rlePage = numberComressor.unCompress(fileReader
+          .readByteArray(filePath, dimensionColumnChunk.get(blockIndex).getRlePageOffset(),
+              dimensionColumnChunk.get(blockIndex).getRlePageLength()));
+      // uncompress the data with rle indexes
+      dataPage = UnBlockIndexer.uncompressData(dataPage, rlePage, eachColumnValueSize[blockIndex]);
+      rlePage = null;
+    }
+    // fill chunk attributes
+    DimensionChunkAttributes chunkAttributes = new DimensionChunkAttributes();
+    chunkAttributes.setEachRowSize(eachColumnValueSize[blockIndex]);
+    chunkAttributes.setInvertedIndexes(invertedIndexes);
+    chunkAttributes.setInvertedIndexesReverse(invertedIndexesReverse);
+    DimensionColumnDataChunk columnDataChunk = null;
+
+    if (dimensionColumnChunk.get(blockIndex).isRowMajor()) {
+      // to store fixed length column chunk values
+      columnDataChunk = new ColumnGroupDimensionDataChunk(dataPage, chunkAttributes);
+    }
+    // if no dictionary column then first create a no dictionary column chunk
+    // and set to data chunk instance
+    else if (!CarbonUtil
+        .hasEncoding(dimensionColumnChunk.get(blockIndex).getEncodingList(), Encoding.DICTIONARY)) {
+      columnDataChunk =
+          new VariableLengthDimensionDataChunk(getNoDictionaryDataChunk(dataPage), chunkAttributes);
+      chunkAttributes.setNoDictionary(true);
+    } else {
+      // to store fixed length column chunk values
+      columnDataChunk = new FixedLengthDimensionDataChunk(dataPage, chunkAttributes);
+    }
+    return columnDataChunk;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/measure/AbstractMeasureChunkReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/measure/AbstractMeasureChunkReader.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/measure/AbstractMeasureChunkReader.java
new file mode 100644
index 0000000..dc8771f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/measure/AbstractMeasureChunkReader.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk.reader.measure;
+
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.reader.MeasureColumnChunkReader;
+import org.apache.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
+
+/**
+ * Measure block reader abstract class
+ */
+public abstract class AbstractMeasureChunkReader implements MeasureColumnChunkReader {
+
+  /**
+   * metadata which was to used to compress and uncompress the measure value
+   */
+  protected ValueCompressionModel compressionModel;
+
+  /**
+   * file path from which blocks will be read
+   */
+  protected String filePath;
+
+  /**
+   * measure chunk have the information about the metadata present in the file
+   */
+  protected List<DataChunk> measureColumnChunk;
+
+  /**
+   * type of valu comprssion model selected for each measure column
+   */
+  protected UnCompressValue[] values;
+
+  /**
+   * Constructor to get minimum parameter to create instance of this class
+   *
+   * @param measureColumnChunk measure chunk metadata
+   * @param compression        model metadata which was to used to compress and uncompress
+   *                           the measure value
+   * @param filePath           file from which data will be read
+   * @param isInMemory         in case of in memory it will read and holds the data and when
+   *                           query request will come it will uncompress and the data
+   */
+  public AbstractMeasureChunkReader(List<DataChunk> measureColumnChunk,
+      ValueCompressionModel compressionModel, String filePath, boolean isInMemory) {
+    this.measureColumnChunk = measureColumnChunk;
+    this.compressionModel = compressionModel;
+    this.filePath = filePath;
+    values =
+        new ValueCompressonHolder.UnCompressValue[compressionModel.getUnCompressValues().length];
+    for (int i = 0; i < values.length; i++) {
+      values[i] = compressionModel.getUnCompressValues()[i].getNew().getCompressorObject();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/measure/CompressedMeasureChunkFileBasedReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/measure/CompressedMeasureChunkFileBasedReader.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/measure/CompressedMeasureChunkFileBasedReader.java
new file mode 100644
index 0000000..31c470d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/measure/CompressedMeasureChunkFileBasedReader.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.chunk.reader.measure;
+
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
+
+/**
+ * Compressed measure chunk reader
+ */
+public class CompressedMeasureChunkFileBasedReader extends AbstractMeasureChunkReader {
+
+  /**
+   * Constructor to get minimum parameter to create instance of this class
+   *
+   * @param measureColumnChunk measure chunk metadata
+   * @param compression        model metadata which was to used to compress and uncompress
+   *                           the measure value
+   * @param filePath           file from which data will be read
+   */
+  public CompressedMeasureChunkFileBasedReader(List<DataChunk> measureColumnChunk,
+      ValueCompressionModel compressionModel, String filePath) {
+    super(measureColumnChunk, compressionModel, filePath, false);
+  }
+
+  /**
+   * Method to read the blocks data based on block indexes
+   *
+   * @param fileReader   file reader to read the blocks
+   * @param blockIndexes blocks to be read
+   * @return measure data chunks
+   */
+  @Override public MeasureColumnDataChunk[] readMeasureChunks(FileHolder fileReader,
+      int... blockIndexes) {
+    MeasureColumnDataChunk[] datChunk = new MeasureColumnDataChunk[values.length];
+    for (int i = 0; i < blockIndexes.length; i++) {
+      datChunk[blockIndexes[i]] = readMeasureChunk(fileReader, blockIndexes[i]);
+    }
+    return datChunk;
+  }
+
+  /**
+   * Method to read the blocks data based on block index
+   *
+   * @param fileReader file reader to read the blocks
+   * @param blockIndex block to be read
+   * @return measure data chunk
+   */
+  @Override public MeasureColumnDataChunk readMeasureChunk(FileHolder fileReader, int blockIndex) {
+    MeasureColumnDataChunk datChunk = new MeasureColumnDataChunk();
+    // create a new uncompressor
+    ValueCompressonHolder.UnCompressValue copy = values[blockIndex].getNew();
+    // read data from file and set to uncompressor
+    copy.setValue(fileReader
+        .readByteArray(filePath, measureColumnChunk.get(blockIndex).getDataPageOffset(),
+            measureColumnChunk.get(blockIndex).getDataPageLength()));
+    // get the data holder after uncompressing
+    CarbonReadDataHolder measureDataHolder =
+        copy.uncompress(compressionModel.getChangedDataType()[blockIndex])
+            .getValues(compressionModel.getDecimal()[blockIndex],
+                compressionModel.getMaxValue()[blockIndex]);
+    // set the data chunk
+    datChunk.setMeasureDataHolder(measureDataHolder);
+    // set the enun value indexes
+    datChunk
+        .setNullValueIndexHolder(measureColumnChunk.get(blockIndex).getNullValueIndexForColumn());
+    return datChunk;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/exception/IndexBuilderException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/exception/IndexBuilderException.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/exception/IndexBuilderException.java
new file mode 100644
index 0000000..a9c7343
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/exception/IndexBuilderException.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.datastore.exception;
+
+import java.util.Locale;
+
+/**
+ * Exception class for block builder
+ *
+ * @author Administrator
+ */
+public class IndexBuilderException extends Exception {
+  /**
+   * default serial version ID.
+   */
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * The Error message.
+   */
+  private String msg = "";
+
+  /**
+   * Constructor
+   *
+   * @param errorCode The error code for this exception.
+   * @param msg       The error message for this exception.
+   */
+  public IndexBuilderException(String msg) {
+    super(msg);
+    this.msg = msg;
+  }
+
+  /**
+   * Constructor
+   *
+   * @param msg       exception message
+   * @param throwable detail exception
+   */
+  public IndexBuilderException(String msg, Throwable throwable) {
+    super(msg, throwable);
+    this.msg = msg;
+  }
+
+  /**
+   * Constructor
+   *
+   * @param throwable exception
+   */
+  public IndexBuilderException(Throwable throwable) {
+    super(throwable);
+  }
+
+  /**
+   * This method is used to get the localized message.
+   *
+   * @param locale - A Locale object represents a specific geographical,
+   *               political, or cultural region.
+   * @return - Localized error message.
+   */
+  public String getLocalizedMessage(Locale locale) {
+    return "";
+  }
+
+  /**
+   * getLocalizedMessage
+   */
+  @Override public String getLocalizedMessage() {
+    return super.getLocalizedMessage();
+  }
+
+  /**
+   * getMessage
+   */
+  public String getMessage() {
+    return this.msg;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeBuilder.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeBuilder.java
new file mode 100644
index 0000000..d414d3e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeBuilder.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.impl.btree;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.BtreeBuilder;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.CarbonProperties;
+
+/**
+ * Abstract Btree based builder
+ */
+public abstract class AbstractBTreeBuilder implements BtreeBuilder {
+
+  /**
+   * default Number of keys per page
+   */
+  private static final int DEFAULT_NUMBER_OF_ENTRIES_NONLEAF = 32;
+
+  /**
+   * Maximum number of entries in intermediate nodes
+   */
+  protected int maxNumberOfEntriesInNonLeafNodes;
+
+  /**
+   * Number of leaf nodes
+   */
+  protected int nLeaf;
+
+  /**
+   * root node of a btree
+   */
+  protected BTreeNode root;
+
+  public AbstractBTreeBuilder() {
+    maxNumberOfEntriesInNonLeafNodes = Integer.parseInt(CarbonProperties.getInstance()
+        .getProperty("com.huawei.datastore.internalnodesize",
+            DEFAULT_NUMBER_OF_ENTRIES_NONLEAF + ""));
+  }
+
+  /**
+   * Below method is to build the intermediate node of the btree
+   *
+   * @param curNode              current node
+   * @param childNodeGroups      children group which will have all the children for
+   *                             particular intermediate node
+   * @param currentGroup         current group
+   * @param interNSKeyList       list if keys
+   * @param numberOfInternalNode number of internal node
+   */
+  protected void addIntermediateNode(BTreeNode curNode, List<BTreeNode[]> childNodeGroups,
+      BTreeNode[] currentGroup, List<List<IndexKey>> interNSKeyList, int numberOfInternalNode) {
+
+    int groupCounter;
+    // Build internal nodes level by level. Each upper node can have
+    // upperMaxEntry keys and upperMaxEntry+1 children
+    int remainder;
+    int nHigh = numberOfInternalNode;
+    boolean bRootBuilt = false;
+    remainder = nLeaf % (maxNumberOfEntriesInNonLeafNodes);
+    List<IndexKey> interNSKeys = null;
+    while (nHigh > 1 || !bRootBuilt) {
+      List<BTreeNode[]> internalNodeGroups =
+          new ArrayList<BTreeNode[]>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+      List<List<IndexKey>> interNSKeyTmpList =
+          new ArrayList<List<IndexKey>>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+      numberOfInternalNode = 0;
+      for (int i = 0; i < nHigh; i++) {
+        // Create a new internal node
+        curNode = new BTreeNonLeafNode();
+        // Allocate a new node group if current node group is full
+        groupCounter = i % (maxNumberOfEntriesInNonLeafNodes);
+        if (groupCounter == 0) {
+          // Create new node group
+          currentGroup = new BTreeNonLeafNode[maxNumberOfEntriesInNonLeafNodes];
+          internalNodeGroups.add(currentGroup);
+          numberOfInternalNode++;
+          interNSKeys = new ArrayList<IndexKey>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+          interNSKeyTmpList.add(interNSKeys);
+        }
+
+        // Add the new internal node to current group
+        if (null != currentGroup) {
+          currentGroup[groupCounter] = curNode;
+        }
+        int nNodes;
+
+        if (i == nHigh - 1 && remainder != 0) {
+          nNodes = remainder;
+        } else {
+          nNodes = maxNumberOfEntriesInNonLeafNodes;
+        }
+        // Point the internal node to its children node group
+        curNode.setChildren(childNodeGroups.get(i));
+        // Fill the internal node with keys based on its child nodes
+        for (int j = 0; j < nNodes; j++) {
+          curNode.setKey(interNSKeyList.get(i).get(j));
+          if (j == 0 && null != interNSKeys) {
+            interNSKeys.add(interNSKeyList.get(i).get(j));
+
+          }
+        }
+      }
+      // If nHigh is 1, we have the root node
+      if (nHigh == 1) {
+        bRootBuilt = true;
+      }
+
+      remainder = nHigh % (maxNumberOfEntriesInNonLeafNodes);
+      nHigh = numberOfInternalNode;
+      childNodeGroups = internalNodeGroups;
+      interNSKeyList = interNSKeyTmpList;
+    }
+    root = curNode;
+  }
+
+  /**
+   * Below method is to convert the start key
+   * into fixed and variable length key.
+   * data format<lenght><fixed length key><length><variable length key>
+   *
+   * @param startKey
+   * @return Index key
+   */
+  protected IndexKey convertStartKeyToNodeEntry(byte[] startKey) {
+    ByteBuffer buffer = ByteBuffer.wrap(startKey);
+    buffer.rewind();
+    int dictonaryKeySize = buffer.getInt();
+    int nonDictonaryKeySize = buffer.getInt();
+    byte[] dictionaryKey = new byte[dictonaryKeySize];
+    buffer.get(dictionaryKey);
+    byte[] nonDictionaryKey = new byte[nonDictonaryKeySize];
+    buffer.get(nonDictionaryKey);
+    IndexKey entry = new IndexKey(dictionaryKey, nonDictionaryKey);
+    return entry;
+  }
+
+  /**
+   * Below method will be used to get the first data block
+   * in Btree case it will be root node
+   */
+  @Override public BTreeNode get() {
+    return root;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeLeafNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeLeafNode.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeLeafNode.java
new file mode 100644
index 0000000..de476ad
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeLeafNode.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.impl.btree;
+
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+/**
+ * Non leaf node abstract class
+ */
+public abstract class AbstractBTreeLeafNode implements BTreeNode {
+
+  /**
+   * number of keys in a btree
+   */
+  protected int numberOfKeys;
+
+  /**
+   * node number
+   */
+  protected long nodeNumber;
+
+  /**
+   * Next node of the leaf
+   */
+  protected BTreeNode nextNode;
+
+  /**
+   * max key of the column this will be used to check whether this leaf will
+   * be used for scanning or not
+   */
+  protected byte[][] maxKeyOfColumns;
+
+  /**
+   * min key of the column this will be used to check whether this leaf will
+   * be used for scanning or not
+   */
+  protected byte[][] minKeyOfColumns;
+
+  /**
+   * Method to get the next block this can be used while scanning when
+   * iterator of this class can be used iterate over blocks
+   *
+   * @return next block
+   */
+  @Override public int nodeSize() {
+    return this.numberOfKeys;
+  }
+
+  /**
+   * below method will used to set the next node
+   *
+   * @param nextNode
+   */
+  @Override public void setNextNode(BTreeNode nextNode) {
+    this.nextNode = nextNode;
+  }
+
+  /**
+   * Below method is to get the children based on index
+   *
+   * @param index children index
+   * @return btree node
+   */
+  @Override public BTreeNode getChild(int index) {
+    throw new UnsupportedOperationException("Operation not supported in case of leaf node");
+  }
+
+  /**
+   * below method to set the node entry
+   *
+   * @param key node entry
+   */
+  @Override public void setKey(IndexKey key) {
+    throw new UnsupportedOperationException("Operation not supported in case of leaf node");
+  }
+
+  /**
+   * Method can be used to get the block index .This can be used when multiple
+   * thread can be used scan group of blocks in that can we can assign the
+   * some of the blocks to one thread and some to other
+   *
+   * @return block number
+   */
+  @Override public long nodeNumber() {
+    return nodeNumber;
+  }
+
+  /**
+   * This method will be used to get the max value of all the columns this can
+   * be used in case of filter query
+   *
+   * @param max value of all the columns
+   */
+  @Override public byte[][] getColumnsMaxValue() {
+    return maxKeyOfColumns;
+  }
+
+  /**
+   * This method will be used to get the max value of all the columns this can
+   * be used in case of filter query
+   *
+   * @param max value of all the columns
+   */
+  @Override public byte[][] getColumnsMinValue() {
+    return minKeyOfColumns;
+  }
+
+  /**
+   * to check whether node in a btree is a leaf node or not
+   *
+   * @return leaf node or not
+   */
+  @Override public boolean isLeafNode() {
+    return true;
+  }
+
+  /**
+   * Method to get the next block this can be used while scanning when
+   * iterator of this class can be used iterate over blocks
+   *
+   * @return next block
+   */
+  @Override public DataRefNode getNextDataRefNode() {
+    return nextNode;
+  }
+
+  /**
+   * below method will return the one node indexes
+   *
+   * @return node entry array
+   */
+  @Override public IndexKey[] getNodeKeys() {
+    // as this is a leaf node so this method implementation is not required
+    throw new UnsupportedOperationException("Operation not supported in case of leaf node");
+  }
+
+  /**
+   * below method will be used to set the children of intermediate node
+   *
+   * @param children array
+   */
+  @Override public void setChildren(BTreeNode[] children) {
+    // no required in case of leaf node as leaf node will not have any children
+    throw new UnsupportedOperationException("Operation not supported in case of leaf node");
+  }
+
+  /**
+   * Below method will be used to get the dimension chunks
+   *
+   * @param fileReader   file reader to read the chunks from file
+   * @param blockIndexes indexes of the blocks need to be read
+   * @return dimension data chunks
+   */
+  @Override public DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader,
+      int[] blockIndexes) {
+    // No required here as leaf which will will be use this class will implement its own get
+    // dimension chunks
+    return null;
+  }
+
+  /**
+   * Below method will be used to get the dimension chunk
+   *
+   * @param fileReader file reader to read the chunk from file
+   * @param blockIndex block index to be read
+   * @return dimension data chunk
+   */
+  @Override public DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader,
+      int blockIndex) {
+    // No required here as leaf which will will be use this class will implement
+    // its own get dimension chunks
+    return null;
+  }
+
+  /**
+   * Below method will be used to get the measure chunk
+   *
+   * @param fileReader   file reader to read the chunk from file
+   * @param blockIndexes block indexes to be read from file
+   * @return measure column data chunk
+   */
+  @Override public MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader,
+      int[] blockIndexes) {
+    // No required here as leaf which will will be use this class will implement its own get
+    // measure chunks
+    return null;
+  }
+
+  /**
+   * Below method will be used to read the measure chunk
+   *
+   * @param fileReader file read to read the file chunk
+   * @param blockIndex block index to be read from file
+   * @return measure data chunk
+   */
+  @Override public MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex) {
+    // No required here as leaf which will will be use this class will implement its own get
+    // measure chunks
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java
new file mode 100644
index 0000000..e443182
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.impl.btree;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.datastore.DataRefNodeFinder;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.util.ByteUtil;
+
+/**
+ * Below class will be used to find a block in a btree
+ */
+public class BTreeDataRefNodeFinder implements DataRefNodeFinder {
+
+  /**
+   * no dictionary column value is of variable length so in each column value
+   * it will -1
+   */
+  private static final int NO_DCITIONARY_COLUMN_VALUE = -1;
+
+  /**
+   * sized of the short value in bytes
+   */
+  private static final short SHORT_SIZE_IN_BYTES = 2;
+  /**
+   * this will holds the information about the size of each value of a column,
+   * this will be used during Comparison of the btree node value and the
+   * search value if value is more than zero then its a fixed length column
+   * else its variable length column. So as data of both type of column store
+   * separately so this value size array will be used for both purpose
+   * comparison and jumping(which type value we need to compare)
+   */
+  private int[] eachColumnValueSize;
+
+  /**
+   * this will be used during search for no dictionary column
+   */
+  private int numberOfNoDictionaryColumns;
+
+  public BTreeDataRefNodeFinder(int[] eachColumnValueSize) {
+    this.eachColumnValueSize = eachColumnValueSize;
+
+    for (int i = 0; i < eachColumnValueSize.length; i++) {
+      if (eachColumnValueSize[i] == -1) {
+        numberOfNoDictionaryColumns++;
+      }
+    }
+  }
+
+  /**
+   * Below method will be used to get the first tentative data block based on
+   * search key
+   *
+   * @param dataBlocks complete data blocks present
+   * @param serachKey  key to be search
+   * @return data block
+   */
+  @Override public DataRefNode findFirstDataBlock(DataRefNode dataRefBlock, IndexKey searchKey) {
+    // as its for btree type cast it to btree interface
+    BTreeNode rootNode = (BTreeNode) dataRefBlock;
+    while (!rootNode.isLeafNode()) {
+      rootNode = findFirstLeafNode(searchKey, rootNode);
+    }
+    return rootNode;
+  }
+
+  /**
+   * Below method will be used to get the last data tentative block based on
+   * search key
+   *
+   * @param dataBlocks complete data blocks present
+   * @param serachKey  key to be search
+   * @return data block
+   */
+  @Override public DataRefNode findLastDataBlock(DataRefNode dataRefBlock, IndexKey searchKey) {
+    // as its for btree type cast it to btree interface
+    BTreeNode rootNode = (BTreeNode) dataRefBlock;
+    while (!rootNode.isLeafNode()) {
+      rootNode = findLastLeafNode(searchKey, rootNode);
+    }
+    return rootNode;
+  }
+
+  /**
+   * Binary search used to get the first tentative block of the btree based on
+   * search key
+   *
+   * @param key  search key
+   * @param node root node of btree
+   * @return first tentative block
+   */
+  private BTreeNode findFirstLeafNode(IndexKey key, BTreeNode node) {
+    int childNodeIndex;
+    int low = 0;
+    int high = node.nodeSize() - 1;
+    int mid = 0;
+    int compareRes = -1;
+    IndexKey[] nodeKeys = node.getNodeKeys();
+    //
+    while (low <= high) {
+      mid = (low + high) >>> 1;
+      // compare the entries
+      compareRes = compareIndexes(key, nodeKeys[mid]);
+      if (compareRes < 0) {
+        high = mid - 1;
+      } else if (compareRes > 0) {
+        low = mid + 1;
+      } else {
+        // if key is matched then get the first entry
+        int currentPos = mid;
+        while (currentPos - 1 >= 0 && compareIndexes(key, nodeKeys[currentPos - 1]) == 0) {
+          currentPos--;
+        }
+        mid = currentPos;
+        break;
+      }
+    }
+    // if compare result is less than zero then we
+    // and mid is more than 0 then we need to previous block as duplicates
+    // record can be present
+    if (compareRes < 0) {
+      if (mid > 0) {
+        mid--;
+      }
+      childNodeIndex = mid;
+    } else {
+      childNodeIndex = mid;
+    }
+    // get the leaf child
+    node = node.getChild(childNodeIndex);
+    return node;
+  }
+
+  /**
+   * Binary search used to get the last tentative block of the btree based on
+   * search key
+   *
+   * @param key  search key
+   * @param node root node of btree
+   * @return first tentative block
+   */
+  private BTreeNode findLastLeafNode(IndexKey key, BTreeNode node) {
+    int childNodeIndex;
+    int low = 0;
+    int high = node.nodeSize() - 1;
+    int mid = 0;
+    int compareRes = -1;
+    IndexKey[] nodeKeys = node.getNodeKeys();
+    //
+    while (low <= high) {
+      mid = (low + high) >>> 1;
+      // compare the entries
+      compareRes = compareIndexes(key, nodeKeys[mid]);
+      if (compareRes < 0) {
+        high = mid - 1;
+      } else if (compareRes > 0) {
+        low = mid + 1;
+      } else {
+        int currentPos = mid;
+        // if key is matched then get the first entry
+        while (currentPos + 1 < node.nodeSize()
+            && compareIndexes(key, nodeKeys[currentPos + 1]) == 0) {
+          currentPos++;
+        }
+        mid = currentPos;
+        break;
+      }
+    }
+    // if compare result is less than zero then we
+    // and mid is more than 0 then we need to previous block as duplicates
+    // record can be present
+    if (compareRes < 0) {
+      if (mid > 0) {
+        mid--;
+      }
+      childNodeIndex = mid;
+    } else {
+      childNodeIndex = mid;
+    }
+    node = node.getChild(childNodeIndex);
+    return node;
+  }
+
+  /**
+   * Comparison of index key will be following format of key <Dictionary> key
+   * will be in byte array No dictionary key Index of FirstKey (2
+   * bytes)><Index of SecondKey (2 bytes)><Index of NKey (2 bytes)> <First Key
+   * ByteArray><2nd Key ByteArray><N Key ByteArray> in each column value size
+   * of no dictionary column will be -1 if in each column value is not -1 then
+   * compare the byte array based on size and increment the offset to
+   * dictionary column size if size is -1 then its a no dictionary key so to
+   * get the length subtract the size of current with next key offset it will
+   * give the actual length if it is at last position or only one key is
+   * present then subtract with length
+   *
+   * @param first  key
+   * @param second key
+   * @return comparison value
+   */
+  private int compareIndexes(IndexKey first, IndexKey second) {
+    int dictionaryKeyOffset = 0;
+    int nonDictionaryKeyOffset = 0;
+    int compareResult = 0;
+    int processedNoDictionaryColumn = numberOfNoDictionaryColumns;
+    ByteBuffer firstNoDictionaryKeyBuffer = ByteBuffer.wrap(first.getNoDictionaryKeys());
+    ByteBuffer secondNoDictionaryKeyBuffer = ByteBuffer.wrap(second.getNoDictionaryKeys());
+    int actualOffset = 0;
+    int firstNoDcitionaryLength = 0;
+    int secondNodeDictionaryLength = 0;
+
+    for (int i = 0; i < eachColumnValueSize.length; i++) {
+
+      if (eachColumnValueSize[i] != NO_DCITIONARY_COLUMN_VALUE) {
+        compareResult = ByteUtil.UnsafeComparer.INSTANCE
+            .compareTo(first.getDictionaryKeys(), dictionaryKeyOffset, eachColumnValueSize[i],
+                second.getDictionaryKeys(), dictionaryKeyOffset, eachColumnValueSize[i]);
+        dictionaryKeyOffset += eachColumnValueSize[i];
+      } else {
+        if (processedNoDictionaryColumn > 1) {
+          actualOffset = firstNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset);
+          firstNoDcitionaryLength =
+              firstNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset + SHORT_SIZE_IN_BYTES);
+          secondNodeDictionaryLength =
+              secondNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset + SHORT_SIZE_IN_BYTES);
+          compareResult = ByteUtil.UnsafeComparer.INSTANCE
+              .compareTo(first.getNoDictionaryKeys(), actualOffset, firstNoDcitionaryLength,
+                  second.getNoDictionaryKeys(), actualOffset, secondNodeDictionaryLength);
+          nonDictionaryKeyOffset += SHORT_SIZE_IN_BYTES;
+          processedNoDictionaryColumn--;
+        } else {
+          actualOffset = firstNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset);
+          firstNoDcitionaryLength = first.getNoDictionaryKeys().length - actualOffset;
+          secondNodeDictionaryLength = second.getNoDictionaryKeys().length - actualOffset;
+          compareResult = ByteUtil.UnsafeComparer.INSTANCE
+              .compareTo(first.getNoDictionaryKeys(), actualOffset, firstNoDcitionaryLength,
+                  second.getNoDictionaryKeys(), actualOffset, secondNodeDictionaryLength);
+        }
+      }
+      if (compareResult != 0) {
+        return compareResult;
+      }
+    }
+
+    return 0;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeNode.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeNode.java
new file mode 100644
index 0000000..6b624c6
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeNode.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.impl.btree;
+
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+
+/**
+ * Interface for btree node
+ */
+public interface BTreeNode extends DataRefNode {
+
+  /**
+   * below method will return the one node indexes
+   *
+   * @return node entry array
+   */
+  IndexKey[] getNodeKeys();
+
+  /**
+   * to check whether node in a btree is a leaf node or not
+   *
+   * @return leaf node or not
+   */
+  boolean isLeafNode();
+
+  /**
+   * below method will be used to set the children of intermediate node
+   *
+   * @param children array
+   */
+  void setChildren(BTreeNode[] children);
+
+  /**
+   * below method will used to set the next node
+   *
+   * @param nextNode
+   */
+  void setNextNode(BTreeNode nextNode);
+
+  /**
+   * Below method is to get the children based on index
+   *
+   * @param index children index
+   * @return btree node
+   */
+  BTreeNode getChild(int index);
+
+  /**
+   * below method to set the node entry
+   *
+   * @param key node entry
+   */
+  void setKey(IndexKey key);
+}


[23/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/AbsoluteTableIdentifier.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/AbsoluteTableIdentifier.java b/core/src/main/java/org/carbondata/core/carbon/AbsoluteTableIdentifier.java
deleted file mode 100644
index 7234a82..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/AbsoluteTableIdentifier.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon;
-
-import java.io.Serializable;
-
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-
-/**
- * identifier which will have store path and carbon table identifier
- */
-public class AbsoluteTableIdentifier implements Serializable {
-
-  /**
-   * serializable version
-   */
-  private static final long serialVersionUID = 4695047103484427506L;
-
-  /**
-   * path of the store
-   */
-  private String storePath;
-
-  /**
-   * carbon table identifier which will have table name and table database
-   * name
-   */
-  private CarbonTableIdentifier carbonTableIdentifier;
-
-  public AbsoluteTableIdentifier(String storePath, CarbonTableIdentifier carbonTableIdentifier) {
-    //TODO this should be moved to common place where path handling will be handled
-    this.storePath = FileFactory.getUpdatedFilePath(storePath);
-    this.carbonTableIdentifier = carbonTableIdentifier;
-  }
-
-  /**
-   * @return the storePath
-   */
-  public String getStorePath() {
-    return storePath;
-  }
-
-  /**
-   * @return the carbonTableIdentifier
-   */
-  public CarbonTableIdentifier getCarbonTableIdentifier() {
-    return carbonTableIdentifier;
-  }
-
-  /**
-   * to get the hash code
-   */
-  @Override public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result =
-        prime * result + ((carbonTableIdentifier == null) ? 0 : carbonTableIdentifier.hashCode());
-    result = prime * result + ((storePath == null) ? 0 : storePath.hashCode());
-    return result;
-  }
-
-  /**
-   * to check this class is equal to
-   * other object passed
-   *
-   * @param obj other object
-   */
-  @Override public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (obj == null) {
-      return false;
-    }
-    if (!(obj instanceof AbsoluteTableIdentifier)) {
-      return false;
-    }
-    AbsoluteTableIdentifier other = (AbsoluteTableIdentifier) obj;
-    if (carbonTableIdentifier == null) {
-      if (other.carbonTableIdentifier != null) {
-        return false;
-      }
-    } else if (!carbonTableIdentifier.equals(other.carbonTableIdentifier)) {
-      return false;
-    }
-    if (storePath == null) {
-      if (other.storePath != null) {
-        return false;
-      }
-    } else if (!storePath.equals(other.storePath)) {
-      return false;
-    }
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/CarbonDataLoadSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/CarbonDataLoadSchema.java b/core/src/main/java/org/carbondata/core/carbon/CarbonDataLoadSchema.java
deleted file mode 100644
index f23f600..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/CarbonDataLoadSchema.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.schema.table.CarbonTable;
-
-/**
- * Wrapper Data Load Schema object which will be used to
- * support relation while data loading
- */
-public class CarbonDataLoadSchema implements Serializable {
-
-  /**
-   * default serializer
-   */
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * CarbonTable info
-   */
-  private CarbonTable carbonTable;
-
-  /**
-   * dimension table and relation info
-   */
-  private List<DimensionRelation> dimensionRelationList;
-
-  /**
-   * CarbonDataLoadSchema constructor which takes CarbonTable
-   *
-   * @param carbonTable
-   */
-  public CarbonDataLoadSchema(CarbonTable carbonTable) {
-    this.carbonTable = carbonTable;
-    this.dimensionRelationList = new ArrayList<DimensionRelation>();
-  }
-
-  /**
-   * get dimension relation list
-   *
-   * @return dimensionRelationList
-   */
-  public List<DimensionRelation> getDimensionRelationList() {
-    return dimensionRelationList;
-  }
-
-  /**
-   * set dimensionrelation list
-   *
-   * @param dimensionRelationList
-   */
-  public void setDimensionRelationList(List<DimensionRelation> dimensionRelationList) {
-    this.dimensionRelationList = dimensionRelationList;
-  }
-
-  /**
-   * get carbontable
-   *
-   * @return carbonTable
-   */
-  public CarbonTable getCarbonTable() {
-    return carbonTable;
-  }
-
-  /**
-   * Dimension Relation object which will be filled from
-   * Load DML Command to support normalized table data load
-   */
-  public static class DimensionRelation implements Serializable {
-    /**
-     * default serializer
-     */
-    private static final long serialVersionUID = 1L;
-
-    /**
-     * dimension tableName
-     */
-    private String tableName;
-
-    /**
-     * dimensionSource csv path
-     */
-    private String dimensionSource;
-
-    /**
-     * relation with fact and dimension table
-     */
-    private Relation relation;
-
-    /**
-     * Columns to selected from dimension table.
-     * Hierarchy in-memory table should be prepared
-     * based on selected columns
-     */
-    private List<String> columns;
-
-    /**
-     * constructor
-     *
-     * @param tableName       - dimension table name
-     * @param dimensionSource - source file path
-     * @param relation        - fact foreign key with dimension primary key mapping
-     * @param columns         - list of columns to be used from this dimension table
-     */
-    public DimensionRelation(String tableName, String dimensionSource, Relation relation,
-        List<String> columns) {
-      this.tableName = tableName;
-      this.dimensionSource = dimensionSource;
-      this.relation = relation;
-      this.columns = columns;
-    }
-
-    /**
-     * @return tableName
-     */
-    public String getTableName() {
-      return tableName;
-    }
-
-    /**
-     * @return dimensionSource
-     */
-    public String getDimensionSource() {
-      return dimensionSource;
-    }
-
-    /**
-     * @return relation
-     */
-    public Relation getRelation() {
-      return relation;
-    }
-
-    /**
-     * @return columns
-     */
-    public List<String> getColumns() {
-      return columns;
-    }
-  }
-
-  /**
-   * Relation class to specify fact foreignkey column with
-   * dimension primary key column
-   */
-  public static class Relation implements Serializable {
-    /**
-     * default serializer
-     */
-    private static final long serialVersionUID = 1L;
-
-    /**
-     * Fact foreign key column
-     */
-    private String factForeignKeyColumn;
-
-    /**
-     * dimension primary key column
-     */
-    private String dimensionPrimaryKeyColumn;
-
-    /**
-     * constructor
-     *
-     * @param factForeignKeyColumn      - Fact Table Foreign key
-     * @param dimensionPrimaryKeyColumn - Dimension Table primary key
-     */
-    public Relation(String factForeignKeyColumn, String dimensionPrimaryKeyColumn) {
-      this.factForeignKeyColumn = factForeignKeyColumn;
-      this.dimensionPrimaryKeyColumn = dimensionPrimaryKeyColumn;
-    }
-
-    /**
-     * @return factForeignKeyColumn
-     */
-    public String getFactForeignKeyColumn() {
-      return factForeignKeyColumn;
-    }
-
-    /**
-     * @return dimensionPrimaryKeyColumn
-     */
-    public String getDimensionPrimaryKeyColumn() {
-      return dimensionPrimaryKeyColumn;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/CarbonTableIdentifier.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/CarbonTableIdentifier.java b/core/src/main/java/org/carbondata/core/carbon/CarbonTableIdentifier.java
deleted file mode 100644
index 949522c..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/CarbonTableIdentifier.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon;
-
-import java.io.Serializable;
-
-/**
- * Identifier class which will hold the table qualified name
- */
-public class CarbonTableIdentifier implements Serializable {
-
-  /**
-   * database name
-   */
-  private String databaseName;
-
-  /**
-   * table name
-   */
-  private String tableName;
-
-  /**
-   * table id
-   */
-  private String tableId;
-
-  /**
-   * constructor
-   */
-  public CarbonTableIdentifier(String databaseName, String tableName, String tableId) {
-    this.databaseName = databaseName;
-    this.tableName = tableName;
-    this.tableId = tableId;
-  }
-
-  /**
-   * return database name
-   */
-  public String getDatabaseName() {
-    return databaseName;
-  }
-
-  /**
-   * return table name
-   */
-  public String getTableName() {
-    return tableName;
-  }
-
-  /**
-   * @return tableId
-   */
-  public String getTableId() {
-    return tableId;
-  }
-
-  /**
-   * @return table unique name
-   */
-  public String getTableUniqueName() {
-    return databaseName + '_' + tableName;
-  }
-
-  @Override public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + ((databaseName == null) ? 0 : databaseName.hashCode());
-    result = prime * result + ((tableId == null) ? 0 : tableId.hashCode());
-    result = prime * result + ((tableName == null) ? 0 : tableName.hashCode());
-    return result;
-  }
-
-  @Override public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    CarbonTableIdentifier other = (CarbonTableIdentifier) obj;
-    if (databaseName == null) {
-      if (other.databaseName != null) {
-        return false;
-      }
-    } else if (!databaseName.equals(other.databaseName)) {
-      return false;
-    }
-    if (tableId == null) {
-      if (other.tableId != null) {
-        return false;
-      }
-    } else if (!tableId.equals(other.tableId)) {
-      return false;
-    }
-    if (tableName == null) {
-      if (other.tableName != null) {
-        return false;
-      }
-    } else if (!tableName.equals(other.tableName)) {
-      return false;
-    }
-    return true;
-  }
-
-  /*
- * @return table unidque name
- */
-  @Override public String toString() {
-    return databaseName + '_' + tableName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/ColumnIdentifier.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/ColumnIdentifier.java b/core/src/main/java/org/carbondata/core/carbon/ColumnIdentifier.java
deleted file mode 100644
index 74d8b4d..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/ColumnIdentifier.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon;
-
-import java.io.Serializable;
-import java.util.Map;
-
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-
-/**
- * Column unique identifier
- */
-public class ColumnIdentifier implements Serializable {
-
-  /**
-   *
-   */
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * column id
-   */
-  private String columnId;
-
-  /**
-   * column properties
-   */
-  private Map<String, String> columnProperties;
-
-  private DataType dataType;
-
-  /**
-   * @param columnId
-   * @param columnProperties
-   */
-  public ColumnIdentifier(String columnId, Map<String, String> columnProperties,
-      DataType dataType) {
-    this.columnId = columnId;
-    this.columnProperties = columnProperties;
-    this.dataType = dataType;
-  }
-
-  /**
-   * @return columnId
-   */
-  public String getColumnId() {
-    return columnId;
-  }
-
-  /**
-   * @param columnProperty
-   * @return
-   */
-  public String getColumnProperty(String columnProperty) {
-    if (null != columnProperties) {
-      return columnProperties.get(columnProperty);
-    }
-    return null;
-  }
-
-  public DataType getDataType() {
-    return this.dataType;
-  }
-
-  @Override public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + ((columnId == null) ? 0 : columnId.hashCode());
-    return result;
-  }
-
-  @Override public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    ColumnIdentifier other = (ColumnIdentifier) obj;
-    if (columnId == null) {
-      if (other.columnId != null) {
-        return false;
-      }
-    } else if (!columnId.equals(other.columnId)) {
-      return false;
-    }
-    return true;
-  }
-
-  @Override public String toString() {
-    return "ColumnIdentifier [columnId=" + columnId + "]";
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/BTreeBuilderInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/BTreeBuilderInfo.java b/core/src/main/java/org/carbondata/core/carbon/datastore/BTreeBuilderInfo.java
deleted file mode 100644
index 338d80e..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/BTreeBuilderInfo.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore;
-
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
-
-/**
- * below class holds the meta data requires to build the blocks
- */
-public class BTreeBuilderInfo {
-
-  /**
-   * holds all the information about data
-   * file meta data
-   */
-  private List<DataFileFooter> footerList;
-
-  /**
-   * size of the each column value size
-   * this will be useful for reading
-   */
-  private int[] dimensionColumnValueSize;
-
-  public BTreeBuilderInfo(List<DataFileFooter> footerList,
-      int[] dimensionColumnValueSize) {
-    this.dimensionColumnValueSize = dimensionColumnValueSize;
-    this.footerList = footerList;
-  }
-
-  /**
-   * @return the eachDimensionBlockSize
-   */
-  public int[] getDimensionColumnValueSize() {
-    return dimensionColumnValueSize;
-  }
-
-  /**
-   * @return the footerList
-   */
-  public List<DataFileFooter> getFooterList() {
-    return footerList;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/BlockIndexStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/BlockIndexStore.java b/core/src/main/java/org/carbondata/core/carbon/datastore/BlockIndexStore.java
deleted file mode 100644
index 7ef7a24..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/BlockIndexStore.java
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.AbstractIndex;
-import org.carbondata.core.carbon.datastore.block.BlockIndex;
-import org.carbondata.core.carbon.datastore.block.TableBlockInfo;
-import org.carbondata.core.carbon.datastore.exception.IndexBuilderException;
-import org.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.CarbonProperties;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.core.util.CarbonUtilException;
-
-/**
- * Singleton Class to handle loading, unloading,clearing,storing of the table
- * blocks
- */
-public class BlockIndexStore {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(BlockIndexStore.class.getName());
-  /**
-   * singleton instance
-   */
-  private static final BlockIndexStore CARBONTABLEBLOCKSINSTANCE = new BlockIndexStore();
-
-  /**
-   * map to hold the table and its list of blocks
-   */
-  private Map<AbsoluteTableIdentifier, Map<TableBlockInfo, AbstractIndex>> tableBlocksMap;
-
-  /**
-   * map of block info to lock object map, while loading the btree this will be filled
-   * and removed after loading the tree for that particular block info, this will be useful
-   * while loading the tree concurrently so only block level lock will be applied another
-   * block can be loaded concurrently
-   */
-  private Map<TableBlockInfo, Object> blockInfoLock;
-
-  /**
-   * table and its lock object to this will be useful in case of concurrent
-   * query scenario when more than one query comes for same table and in that
-   * case it will ensure that only one query will able to load the blocks
-   */
-  private Map<AbsoluteTableIdentifier, Object> tableLockMap;
-
-  private BlockIndexStore() {
-    tableBlocksMap =
-        new ConcurrentHashMap<AbsoluteTableIdentifier, Map<TableBlockInfo, AbstractIndex>>(
-            CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    tableLockMap = new ConcurrentHashMap<AbsoluteTableIdentifier, Object>(
-        CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    blockInfoLock = new ConcurrentHashMap<TableBlockInfo, Object>();
-  }
-
-  /**
-   * Return the instance of this class
-   *
-   * @return singleton instance
-   */
-  public static BlockIndexStore getInstance() {
-    return CARBONTABLEBLOCKSINSTANCE;
-  }
-
-  /**
-   * below method will be used to load the block which are not loaded and to
-   * get the loaded blocks if all the blocks which are passed is loaded then
-   * it will not load , else it will load.
-   *
-   * @param tableBlocksInfos        list of blocks to be loaded
-   * @param absoluteTableIdentifier absolute Table Identifier to identify the table
-   * @throws IndexBuilderException
-   */
-  public List<AbstractIndex> loadAndGetBlocks(List<TableBlockInfo> tableBlocksInfos,
-      AbsoluteTableIdentifier absoluteTableIdentifier) throws IndexBuilderException {
-    AbstractIndex[] loadedBlock = new AbstractIndex[tableBlocksInfos.size()];
-    addTableLockObject(absoluteTableIdentifier);
-    // sort the block info
-    // so block will be loaded in sorted order this will be required for
-    // query execution
-    Collections.sort(tableBlocksInfos);
-    // get the instance
-    Object lockObject = tableLockMap.get(absoluteTableIdentifier);
-    Map<TableBlockInfo, AbstractIndex> tableBlockMapTemp = null;
-    int numberOfCores = 1;
-    try {
-      numberOfCores = Integer.parseInt(CarbonProperties.getInstance()
-          .getProperty(CarbonCommonConstants.NUM_CORES,
-              CarbonCommonConstants.NUM_CORES_DEFAULT_VAL));
-    } catch (NumberFormatException e) {
-      numberOfCores = Integer.parseInt(CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
-    }
-    ExecutorService executor = Executors.newFixedThreadPool(numberOfCores);
-    // Acquire the lock to ensure only one query is loading the table blocks
-    // if same block is assigned to both the queries
-    synchronized (lockObject) {
-      tableBlockMapTemp = tableBlocksMap.get(absoluteTableIdentifier);
-      // if it is loading for first time
-      if (null == tableBlockMapTemp) {
-        tableBlockMapTemp = new ConcurrentHashMap<TableBlockInfo, AbstractIndex>();
-        tableBlocksMap.put(absoluteTableIdentifier, tableBlockMapTemp);
-      }
-    }
-    AbstractIndex tableBlock = null;
-    List<Future<AbstractIndex>> blocksList = new ArrayList<Future<AbstractIndex>>();
-    int counter = -1;
-    for (TableBlockInfo blockInfo : tableBlocksInfos) {
-      counter++;
-      // if table block is already loaded then do not load
-      // that block
-      tableBlock = tableBlockMapTemp.get(blockInfo);
-      // if block is not loaded
-      if (null == tableBlock) {
-        // check any lock object is present in
-        // block info lock map
-        Object blockInfoLockObject = blockInfoLock.get(blockInfo);
-        // if lock object is not present then acquire
-        // the lock in block info lock and add a lock object in the map for
-        // particular block info, added double checking mechanism to add the lock
-        // object so in case of concurrent query we for same block info only one lock
-        // object will be added
-        if (null == blockInfoLockObject) {
-          synchronized (blockInfoLock) {
-            // again checking the block info lock, to check whether lock object is present
-            // or not if now also not present then add a lock object
-            blockInfoLockObject = blockInfoLock.get(blockInfo);
-            if (null == blockInfoLockObject) {
-              blockInfoLockObject = new Object();
-              blockInfoLock.put(blockInfo, blockInfoLockObject);
-            }
-          }
-        }
-        //acquire the lock for particular block info
-        synchronized (blockInfoLockObject) {
-          // check again whether block is present or not to avoid the
-          // same block is loaded
-          //more than once in case of concurrent query
-          tableBlock = tableBlockMapTemp.get(blockInfo);
-          // if still block is not present then load the block
-          if (null == tableBlock) {
-            blocksList.add(executor.submit(new BlockLoaderThread(blockInfo, tableBlockMapTemp)));
-          }
-        }
-      } else {
-        // if blocks is already loaded then directly set the block at particular position
-        //so block will be present in sorted order
-        loadedBlock[counter] = tableBlock;
-      }
-    }
-    // shutdown the executor gracefully and wait until all the task is finished
-    executor.shutdown();
-    try {
-      executor.awaitTermination(1, TimeUnit.HOURS);
-    } catch (InterruptedException e) {
-      throw new IndexBuilderException(e);
-    }
-    // fill the block which were not loaded before to loaded blocks array
-    fillLoadedBlocks(loadedBlock, blocksList);
-    return Arrays.asList(loadedBlock);
-  }
-
-  /**
-   * Below method will be used to fill the loaded blocks to the array
-   * which will be used for query execution
-   *
-   * @param loadedBlockArray array of blocks which will be filled
-   * @param blocksList       blocks loaded in thread
-   * @throws IndexBuilderException in case of any failure
-   */
-  private void fillLoadedBlocks(AbstractIndex[] loadedBlockArray,
-      List<Future<AbstractIndex>> blocksList) throws IndexBuilderException {
-    int blockCounter = 0;
-    for (int i = 0; i < loadedBlockArray.length; i++) {
-      if (null == loadedBlockArray[i]) {
-        try {
-          loadedBlockArray[i] = blocksList.get(blockCounter++).get();
-        } catch (InterruptedException | ExecutionException e) {
-          throw new IndexBuilderException(e);
-        }
-      }
-
-    }
-  }
-
-  private AbstractIndex loadBlock(Map<TableBlockInfo, AbstractIndex> tableBlockMapTemp,
-      TableBlockInfo blockInfo) throws CarbonUtilException {
-    AbstractIndex tableBlock;
-    DataFileFooter footer;
-    // getting the data file meta data of the block
-    footer = CarbonUtil.readMetadatFile(blockInfo.getFilePath(), blockInfo.getBlockOffset(),
-        blockInfo.getBlockLength());
-    tableBlock = new BlockIndex();
-    footer.setTableBlockInfo(blockInfo);
-    // building the block
-    tableBlock.buildIndex(Arrays.asList(footer));
-    tableBlockMapTemp.put(blockInfo, tableBlock);
-    // finally remove the lock object from block info lock as once block is loaded
-    // it will not come inside this if condition
-    blockInfoLock.remove(blockInfo);
-    return tableBlock;
-  }
-
-  /**
-   * Method to add table level lock if lock is not present for the table
-   *
-   * @param absoluteTableIdentifier
-   */
-  private synchronized void addTableLockObject(AbsoluteTableIdentifier absoluteTableIdentifier) {
-    // add the instance to lock map if it is not present
-    if (null == tableLockMap.get(absoluteTableIdentifier)) {
-      tableLockMap.put(absoluteTableIdentifier, new Object());
-    }
-  }
-
-  /**
-   * This will be used to remove a particular blocks useful in case of
-   * deletion of some of the blocks in case of retention or may be some other
-   * scenario
-   *
-   * @param removeTableBlocksInfos  blocks to be removed
-   * @param absoluteTableIdentifier absolute table identifier
-   */
-  public void removeTableBlocks(List<TableBlockInfo> removeTableBlocksInfos,
-      AbsoluteTableIdentifier absoluteTableIdentifier) {
-    // get the lock object if lock object is not present then it is not
-    // loaded at all
-    // we can return from here
-    Object lockObject = tableLockMap.get(absoluteTableIdentifier);
-    if (null == lockObject) {
-      return;
-    }
-    Map<TableBlockInfo, AbstractIndex> map = tableBlocksMap.get(absoluteTableIdentifier);
-    // if there is no loaded blocks then return
-    if (null == map) {
-      return;
-    }
-    for (TableBlockInfo blockInfos : removeTableBlocksInfos) {
-      map.remove(blockInfos);
-    }
-  }
-
-  /**
-   * remove all the details of a table this will be used in case of drop table
-   *
-   * @param absoluteTableIdentifier absolute table identifier to find the table
-   */
-  public void clear(AbsoluteTableIdentifier absoluteTableIdentifier) {
-    // removing all the details of table
-    tableLockMap.remove(absoluteTableIdentifier);
-    tableBlocksMap.remove(absoluteTableIdentifier);
-  }
-
-  /**
-   * Thread class which will be used to load the blocks
-   */
-  private class BlockLoaderThread implements Callable<AbstractIndex> {
-    /**
-     * table block info to block index map
-     */
-    private Map<TableBlockInfo, AbstractIndex> tableBlockMap;
-
-    // block info
-    private TableBlockInfo blockInfo;
-
-    private BlockLoaderThread(TableBlockInfo blockInfo,
-        Map<TableBlockInfo, AbstractIndex> tableBlockMap) {
-      this.tableBlockMap = tableBlockMap;
-      this.blockInfo = blockInfo;
-    }
-
-    @Override public AbstractIndex call() throws Exception {
-      // load and return the loaded blocks
-      return loadBlock(tableBlockMap, blockInfo);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/BtreeBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/BtreeBuilder.java b/core/src/main/java/org/carbondata/core/carbon/datastore/BtreeBuilder.java
deleted file mode 100644
index f6ea7d0..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/BtreeBuilder.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore;
-
-/**
- * Below interface will be used to build the index
- * in some data structure
- */
-public interface BtreeBuilder {
-
-  /**
-   * Below method will be used to store the leaf collection in some data structure
-   */
-  void build(BTreeBuilderInfo blocksBuilderInfos);
-
-  /**
-   * below method to get the first data block
-   *
-   * @return data block
-   */
-  DataRefNode get();
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/DataRefNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/DataRefNode.java b/core/src/main/java/org/carbondata/core/carbon/datastore/DataRefNode.java
deleted file mode 100644
index 4bb551f..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/DataRefNode.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore;
-
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
-import org.carbondata.core.datastorage.store.FileHolder;
-
-/**
- * Interface data block reference
- */
-public interface DataRefNode {
-
-  /**
-   * Method to get the next block this can be used while scanning when
-   * iterator of this class can be used iterate over blocks
-   *
-   * @return next block
-   */
-  DataRefNode getNextDataRefNode();
-
-  /**
-   * to get the number of keys tuples present in the block
-   *
-   * @return number of keys in the block
-   */
-  int nodeSize();
-
-  /**
-   * Method can be used to get the block index .This can be used when multiple
-   * thread can be used scan group of blocks in that can we can assign the
-   * some of the blocks to one thread and some to other
-   *
-   * @return block number
-   */
-  long nodeNumber();
-
-  /**
-   * This method will be used to get the max value of all the columns this can
-   * be used in case of filter query
-   *
-   * @param max value of all the columns
-   */
-  byte[][] getColumnsMaxValue();
-
-  /**
-   * This method will be used to get the min value of all the columns this can
-   * be used in case of filter query
-   *
-   * @param min value of all the columns
-   */
-  byte[][] getColumnsMinValue();
-
-  /**
-   * Below method will be used to get the dimension chunks
-   *
-   * @param fileReader   file reader to read the chunks from file
-   * @param blockIndexes indexes of the blocks need to be read
-   * @return dimension data chunks
-   */
-  DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader, int[] blockIndexes);
-
-  /**
-   * Below method will be used to get the dimension chunk
-   *
-   * @param fileReader file reader to read the chunk from file
-   * @param blockIndex block index to be read
-   * @return dimension data chunk
-   */
-  DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader, int blockIndexes);
-
-  /**
-   * Below method will be used to get the measure chunk
-   *
-   * @param fileReader   file reader to read the chunk from file
-   * @param blockIndexes block indexes to be read from file
-   * @return measure column data chunk
-   */
-  MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader, int[] blockIndexes);
-
-  /**
-   * Below method will be used to read the measure chunk
-   *
-   * @param fileReader file read to read the file chunk
-   * @param blockIndex block index to be read from file
-   * @return measure data chunk
-   */
-  MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/DataRefNodeFinder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/DataRefNodeFinder.java b/core/src/main/java/org/carbondata/core/carbon/datastore/DataRefNodeFinder.java
deleted file mode 100644
index a4027cf..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/DataRefNodeFinder.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore;
-
-/**
- * Below Interface is to search a block
- */
-public interface DataRefNodeFinder {
-
-  /**
-   * Below method will be used to get the first tentative block which matches with
-   * the search key
-   *
-   * @param dataBlocks complete data blocks present
-   * @param serachKey  key to be search
-   * @return data block
-   */
-  DataRefNode findFirstDataBlock(DataRefNode dataBlocks, IndexKey searchKey);
-
-  /**
-   * Below method will be used to get the last tentative block which matches with
-   * the search key
-   *
-   * @param dataBlocks complete data blocks present
-   * @param serachKey  key to be search
-   * @return data block
-   */
-  DataRefNode findLastDataBlock(DataRefNode dataBlocks, IndexKey searchKey);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/IndexKey.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/IndexKey.java b/core/src/main/java/org/carbondata/core/carbon/datastore/IndexKey.java
deleted file mode 100644
index 49d443c..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/IndexKey.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore;
-
-/**
- * Index class to store the index of the segment blocklet infos
- */
-public class IndexKey {
-
-  /**
-   * key which is generated from key generator
-   */
-  private byte[] dictionaryKeys;
-
-  /**
-   * key which was no generated using key generator
-   * <Index of FirstKey (2 bytes)><Index of SecondKey (2 bytes)><Index of NKey (2 bytes)>
-   * <First Key ByteArray><2nd Key ByteArray><N Key ByteArray>
-   */
-  private byte[] noDictionaryKeys;
-
-  public IndexKey(byte[] dictionaryKeys, byte[] noDictionaryKeys) {
-    this.dictionaryKeys = dictionaryKeys;
-    this.noDictionaryKeys = noDictionaryKeys;
-    if (null == dictionaryKeys) {
-      this.dictionaryKeys = new byte[0];
-    }
-    if (null == noDictionaryKeys) {
-      this.noDictionaryKeys = new byte[0];
-    }
-  }
-
-  /**
-   * @return the dictionaryKeys
-   */
-  public byte[] getDictionaryKeys() {
-    return dictionaryKeys;
-  }
-
-  /**
-   * @return the noDictionaryKeys
-   */
-  public byte[] getNoDictionaryKeys() {
-    return noDictionaryKeys;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/SegmentTaskIndexStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/SegmentTaskIndexStore.java b/core/src/main/java/org/carbondata/core/carbon/datastore/SegmentTaskIndexStore.java
deleted file mode 100644
index c94a100..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/SegmentTaskIndexStore.java
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.AbstractIndex;
-import org.carbondata.core.carbon.datastore.block.SegmentTaskIndex;
-import org.carbondata.core.carbon.datastore.block.TableBlockInfo;
-import org.carbondata.core.carbon.datastore.exception.IndexBuilderException;
-import org.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
-import org.carbondata.core.carbon.path.CarbonTablePath.DataFileUtil;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.core.util.CarbonUtilException;
-
-/**
- * Singleton Class to handle loading, unloading,clearing,storing of the table
- * blocks
- */
-public class SegmentTaskIndexStore {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(SegmentTaskIndexStore.class.getName());
-  /**
-   * singleton instance
-   */
-  private static final SegmentTaskIndexStore SEGMENTTASKINDEXSTORE = new SegmentTaskIndexStore();
-
-  /**
-   * mapping of table identifier to map of segmentId_taskId to table segment
-   * reason of so many map as each segment can have multiple data file and
-   * each file will have its own btree
-   */
-  private Map<AbsoluteTableIdentifier, Map<String, Map<String, AbstractIndex>>> tableSegmentMap;
-
-  /**
-   * map of block info to lock object map, while loading the btree this will be filled
-   * and removed after loading the tree for that particular block info, this will be useful
-   * while loading the tree concurrently so only block level lock will be applied another
-   * block can be loaded concurrently
-   */
-  private Map<String, Object> segmentLockMap;
-
-  /**
-   * table and its lock object to this will be useful in case of concurrent
-   * query scenario when more than one query comes for same table and in  that
-   * case it will ensure that only one query will able to load the blocks
-   */
-  private Map<AbsoluteTableIdentifier, Object> tableLockMap;
-
-  private SegmentTaskIndexStore() {
-    tableSegmentMap =
-        new ConcurrentHashMap<AbsoluteTableIdentifier, Map<String, Map<String, AbstractIndex>>>(
-            CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    tableLockMap = new ConcurrentHashMap<AbsoluteTableIdentifier, Object>(
-        CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    segmentLockMap = new ConcurrentHashMap<String, Object>();
-  }
-
-  /**
-   * Return the instance of this class
-   *
-   * @return singleton instance
-   */
-  public static SegmentTaskIndexStore getInstance() {
-    return SEGMENTTASKINDEXSTORE;
-  }
-
-  /**
-   * Below method will be used to load the segment of segments
-   * One segment may have multiple task , so  table segment will be loaded
-   * based on task id and will return the map of taksId to table segment
-   * map
-   *
-   * @param segmentToTableBlocksInfos segment id to block info
-   * @param absoluteTableIdentifier   absolute table identifier
-   * @return map of taks id to segment mapping
-   * @throws IndexBuilderException
-   */
-  public Map<String, AbstractIndex> loadAndGetTaskIdToSegmentsMap(
-      Map<String, List<TableBlockInfo>> segmentToTableBlocksInfos,
-      AbsoluteTableIdentifier absoluteTableIdentifier) throws IndexBuilderException {
-    // task id to segment map
-    Map<String, AbstractIndex> taskIdToTableSegmentMap =
-        new HashMap<String, AbstractIndex>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    addLockObject(absoluteTableIdentifier);
-    Iterator<Entry<String, List<TableBlockInfo>>> iteratorOverSegmentBlocksInfos =
-        segmentToTableBlocksInfos.entrySet().iterator();
-    Map<String, Map<String, AbstractIndex>> tableSegmentMapTemp =
-        addTableSegmentMap(absoluteTableIdentifier);
-    Map<String, AbstractIndex> taskIdToSegmentIndexMap = null;
-    String segmentId = null;
-    String taskId = null;
-    try {
-      while (iteratorOverSegmentBlocksInfos.hasNext()) {
-        // segment id to table block mapping
-        Entry<String, List<TableBlockInfo>> next = iteratorOverSegmentBlocksInfos.next();
-        // group task id to table block info mapping for the segment
-        Map<String, List<TableBlockInfo>> taskIdToTableBlockInfoMap =
-            mappedAndGetTaskIdToTableBlockInfo(segmentToTableBlocksInfos);
-        // get the existing map of task id to table segment map
-        segmentId = next.getKey();
-        // check if segment is already loaded, if segment is already loaded
-        //no need to load the segment block
-        taskIdToSegmentIndexMap = tableSegmentMapTemp.get(segmentId);
-        if (taskIdToSegmentIndexMap == null) {
-          // get the segment loader lock object this is to avoid
-          // same segment is getting loaded multiple times
-          // in case of concurrent query
-          Object segmentLoderLockObject = segmentLockMap.get(segmentId);
-          if (null == segmentLoderLockObject) {
-            segmentLoderLockObject = addAndGetSegmentLock(segmentId);
-          }
-          // acquire lock to lod the segment
-          synchronized (segmentLoderLockObject) {
-            taskIdToSegmentIndexMap = tableSegmentMapTemp.get(segmentId);
-            if (null == taskIdToSegmentIndexMap) {
-              // creating a map of take if to table segment
-              taskIdToSegmentIndexMap = new HashMap<String, AbstractIndex>();
-              Iterator<Entry<String, List<TableBlockInfo>>> iterator =
-                  taskIdToTableBlockInfoMap.entrySet().iterator();
-              while (iterator.hasNext()) {
-                Entry<String, List<TableBlockInfo>> taskToBlockInfoList = iterator.next();
-                taskId = taskToBlockInfoList.getKey();
-                taskIdToSegmentIndexMap.put(taskId,
-                    loadBlocks(taskId, taskToBlockInfoList.getValue(), absoluteTableIdentifier));
-              }
-              tableSegmentMapTemp.put(next.getKey(), taskIdToSegmentIndexMap);
-              // removing from segment lock map as once segment is loaded
-              //if concurrent query is coming for same segment
-              // it will wait on the lock so after this segment will be already
-              //loaded so lock is not required, that is why removing the
-              // the lock object as it wont be useful
-              segmentLockMap.remove(segmentId);
-            }
-          }
-          taskIdToTableSegmentMap.putAll(taskIdToSegmentIndexMap);
-        }
-      }
-    } catch (CarbonUtilException e) {
-      LOGGER.error("Problem while loading the segment");
-      throw new IndexBuilderException(e);
-    }
-    return taskIdToTableSegmentMap;
-  }
-
-  /**
-   * Below method will be used to get the segment level lock object
-   *
-   * @param segmentId
-   * @return lock object
-   */
-  private synchronized Object addAndGetSegmentLock(String segmentId) {
-    // get the segment lock object if it is present then return
-    // otherwise add the new lock and return
-    Object segmentLoderLockObject = segmentLockMap.get(segmentId);
-    if (null == segmentLoderLockObject) {
-      segmentLoderLockObject = new Object();
-      segmentLockMap.put(segmentId, segmentLoderLockObject);
-    }
-    return segmentLoderLockObject;
-  }
-
-  /**
-   * Below code is to add table lock map which will be used to
-   * add
-   *
-   * @param absoluteTableIdentifier
-   */
-  private synchronized void addLockObject(AbsoluteTableIdentifier absoluteTableIdentifier) {
-    // add the instance to lock map if it is not present
-    if (null == tableLockMap.get(absoluteTableIdentifier)) {
-      tableLockMap.put(absoluteTableIdentifier, new Object());
-    }
-  }
-
-  /**
-   * Below method will be used to get the table segment map
-   * if table segment is not present then it will add and return
-   *
-   * @param absoluteTableIdentifier
-   * @return table segment map
-   */
-  private Map<String, Map<String, AbstractIndex>> addTableSegmentMap(
-      AbsoluteTableIdentifier absoluteTableIdentifier) {
-    // get the instance of lock object
-    Object lockObject = tableLockMap.get(absoluteTableIdentifier);
-    Map<String, Map<String, AbstractIndex>> tableSegmentMapTemp =
-        tableSegmentMap.get(absoluteTableIdentifier);
-    if (null == tableSegmentMapTemp) {
-      synchronized (lockObject) {
-        // segment id to task id to table segment map
-        tableSegmentMapTemp = tableSegmentMap.get(absoluteTableIdentifier);
-        if (null == tableSegmentMapTemp) {
-          tableSegmentMapTemp = new ConcurrentHashMap<String, Map<String, AbstractIndex>>();
-          tableSegmentMap.put(absoluteTableIdentifier, tableSegmentMapTemp);
-        }
-      }
-    }
-    return tableSegmentMapTemp;
-  }
-
-  /**
-   * Below method will be used to load the blocks
-   *
-   * @param tableBlockInfoList
-   * @return loaded segment
-   * @throws CarbonUtilException
-   */
-  private AbstractIndex loadBlocks(String taskId, List<TableBlockInfo> tableBlockInfoList,
-      AbsoluteTableIdentifier tableIdentifier) throws CarbonUtilException {
-    // all the block of one task id will be loaded together
-    // so creating a list which will have all the data file meta data to of one task
-    List<DataFileFooter> footerList =
-        CarbonUtil.readCarbonIndexFile(taskId, tableBlockInfoList, tableIdentifier);
-    AbstractIndex segment = new SegmentTaskIndex();
-    // file path of only first block is passed as it all table block info path of
-    // same task id will be same
-    segment.buildIndex(footerList);
-    return segment;
-  }
-
-  /**
-   * Below method will be used to get the task id to all the table block info belongs to
-   * that task id mapping
-   *
-   * @param segmentToTableBlocksInfos segment if to table blocks info map
-   * @return task id to table block info mapping
-   */
-  private Map<String, List<TableBlockInfo>> mappedAndGetTaskIdToTableBlockInfo(
-      Map<String, List<TableBlockInfo>> segmentToTableBlocksInfos) {
-    Map<String, List<TableBlockInfo>> taskIdToTableBlockInfoMap =
-        new HashMap<String, List<TableBlockInfo>>();
-    Iterator<Entry<String, List<TableBlockInfo>>> iterator =
-        segmentToTableBlocksInfos.entrySet().iterator();
-    while (iterator.hasNext()) {
-      Entry<String, List<TableBlockInfo>> next = iterator.next();
-      List<TableBlockInfo> value = next.getValue();
-      for (TableBlockInfo blockInfo : value) {
-        String taskNo = DataFileUtil.getTaskNo(blockInfo.getFilePath());
-        List<TableBlockInfo> list = taskIdToTableBlockInfoMap.get(taskNo);
-        if (null == list) {
-          list = new ArrayList<TableBlockInfo>();
-          taskIdToTableBlockInfoMap.put(taskNo, list);
-        }
-        list.add(blockInfo);
-      }
-
-    }
-    return taskIdToTableBlockInfoMap;
-  }
-
-  /**
-   * remove all the details of a table this will be used in case of drop table
-   *
-   * @param absoluteTableIdentifier absolute table identifier to find the table
-   */
-  public void clear(AbsoluteTableIdentifier absoluteTableIdentifier) {
-    // removing all the details of table
-    tableLockMap.remove(absoluteTableIdentifier);
-    tableSegmentMap.remove(absoluteTableIdentifier);
-  }
-
-  /**
-   * Below method will be used to remove the segment block based on
-   * segment id is passed
-   *
-   * @param segmentToBeRemoved      segment to be removed
-   * @param absoluteTableIdentifier absoluteTableIdentifier
-   */
-  public void removeTableBlocks(List<String> segmentToBeRemoved,
-      AbsoluteTableIdentifier absoluteTableIdentifier) {
-    // get the lock object if lock object is not present then it is not
-    // loaded at all
-    // we can return from here
-    Object lockObject = tableLockMap.get(absoluteTableIdentifier);
-    if (null == lockObject) {
-      return;
-    }
-    // Acquire the lock and remove only those instance which was loaded
-    Map<String, Map<String, AbstractIndex>> map = tableSegmentMap.get(absoluteTableIdentifier);
-    // if there is no loaded blocks then return
-    if (null == map) {
-      return;
-    }
-    for (String segmentId : segmentToBeRemoved) {
-      map.remove(segmentId);
-    }
-  }
-
-  /**
-   * Below method will be used to check if segment blocks
-   * is already loaded or not
-   *
-   * @param absoluteTableIdentifier
-   * @param segmentId
-   * @return is loaded then return the loaded blocks otherwise null
-   */
-  public Map<String, AbstractIndex> getSegmentBTreeIfExists(
-      AbsoluteTableIdentifier absoluteTableIdentifier, String segmentId) {
-    Map<String, Map<String, AbstractIndex>> tableSegment =
-        tableSegmentMap.get(absoluteTableIdentifier);
-    if (null == tableSegment) {
-      return null;
-    }
-    return tableSegment.get(segmentId);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/block/AbstractIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/block/AbstractIndex.java b/core/src/main/java/org/carbondata/core/carbon/datastore/block/AbstractIndex.java
deleted file mode 100644
index 548522d..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/block/AbstractIndex.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.block;
-
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
-
-public abstract class AbstractIndex {
-
-  /**
-   * vo class which will hold the RS information of the block
-   */
-  protected SegmentProperties segmentProperties;
-
-  /**
-   * data block
-   */
-  protected DataRefNode dataRefNode;
-
-  /**
-   * total number of row present in the block
-   */
-  protected long totalNumberOfRows;
-
-  /**
-   * @return the totalNumberOfRows
-   */
-  public long getTotalNumberOfRows() {
-    return totalNumberOfRows;
-  }
-
-  /**
-   * @return the segmentProperties
-   */
-  public SegmentProperties getSegmentProperties() {
-    return segmentProperties;
-  }
-
-  /**
-   * @return the dataBlock
-   */
-  public DataRefNode getDataRefNode() {
-    return dataRefNode;
-  }
-
-  /**
-   * Below method will be used to load the data block
-   *
-   * @param blockInfo block detail
-   */
-  public abstract void buildIndex(List<DataFileFooter> footerList);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/block/BlockIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/block/BlockIndex.java b/core/src/main/java/org/carbondata/core/carbon/datastore/block/BlockIndex.java
deleted file mode 100644
index 3ace21d..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/block/BlockIndex.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.block;
-
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.BTreeBuilderInfo;
-import org.carbondata.core.carbon.datastore.BtreeBuilder;
-import org.carbondata.core.carbon.datastore.impl.btree.BlockletBTreeBuilder;
-import org.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
-
-/**
- * Class which is responsible for loading the b+ tree block. This class will
- * persist all the detail of a table block
- */
-public class BlockIndex extends AbstractIndex {
-
-  /**
-   * Below method will be used to load the data block
-   *
-   * @param blockInfo block detail
-   */
-  public void buildIndex(List<DataFileFooter> footerList) {
-    // create a metadata details
-    // this will be useful in query handling
-    segmentProperties = new SegmentProperties(footerList.get(0).getColumnInTable(),
-        footerList.get(0).getSegmentInfo().getColumnCardinality());
-    // create a segment builder info
-    BTreeBuilderInfo indexBuilderInfo =
-        new BTreeBuilderInfo(footerList, segmentProperties.getDimensionColumnsValueSize());
-    BtreeBuilder blocksBuilder = new BlockletBTreeBuilder();
-    // load the metadata
-    blocksBuilder.build(indexBuilderInfo);
-    dataRefNode = blocksBuilder.get();
-    totalNumberOfRows = footerList.get(0).getNumberOfRows();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/block/Distributable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/block/Distributable.java b/core/src/main/java/org/carbondata/core/carbon/datastore/block/Distributable.java
deleted file mode 100644
index 977ee3a..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/block/Distributable.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.carbondata.core.carbon.datastore.block;
-
-/**
- * Abstract class which is maintains the locations of node.
- */
-public abstract class Distributable implements Comparable<Distributable> {
-
-  public abstract String[] getLocations();
-}


[21/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/measure/AbstractMeasureChunkReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/measure/AbstractMeasureChunkReader.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/measure/AbstractMeasureChunkReader.java
deleted file mode 100644
index a92539e..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/measure/AbstractMeasureChunkReader.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk.reader.measure;
-
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.chunk.reader.MeasureColumnChunkReader;
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
-
-/**
- * Measure block reader abstract class
- */
-public abstract class AbstractMeasureChunkReader implements MeasureColumnChunkReader {
-
-  /**
-   * metadata which was to used to compress and uncompress the measure value
-   */
-  protected ValueCompressionModel compressionModel;
-
-  /**
-   * file path from which blocks will be read
-   */
-  protected String filePath;
-
-  /**
-   * measure chunk have the information about the metadata present in the file
-   */
-  protected List<DataChunk> measureColumnChunk;
-
-  /**
-   * type of valu comprssion model selected for each measure column
-   */
-  protected UnCompressValue[] values;
-
-  /**
-   * Constructor to get minimum parameter to create instance of this class
-   *
-   * @param measureColumnChunk measure chunk metadata
-   * @param compression        model metadata which was to used to compress and uncompress
-   *                           the measure value
-   * @param filePath           file from which data will be read
-   * @param isInMemory         in case of in memory it will read and holds the data and when
-   *                           query request will come it will uncompress and the data
-   */
-  public AbstractMeasureChunkReader(List<DataChunk> measureColumnChunk,
-      ValueCompressionModel compressionModel, String filePath, boolean isInMemory) {
-    this.measureColumnChunk = measureColumnChunk;
-    this.compressionModel = compressionModel;
-    this.filePath = filePath;
-    values =
-        new ValueCompressonHolder.UnCompressValue[compressionModel.getUnCompressValues().length];
-    for (int i = 0; i < values.length; i++) {
-      values[i] = compressionModel.getUnCompressValues()[i].getNew().getCompressorObject();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/measure/CompressedMeasureChunkFileBasedReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/measure/CompressedMeasureChunkFileBasedReader.java b/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/measure/CompressedMeasureChunkFileBasedReader.java
deleted file mode 100644
index d748cd9..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/chunk/reader/measure/CompressedMeasureChunkFileBasedReader.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.chunk.reader.measure;
-
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
-import org.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
-
-/**
- * Compressed measure chunk reader
- */
-public class CompressedMeasureChunkFileBasedReader extends AbstractMeasureChunkReader {
-
-  /**
-   * Constructor to get minimum parameter to create instance of this class
-   *
-   * @param measureColumnChunk measure chunk metadata
-   * @param compression        model metadata which was to used to compress and uncompress
-   *                           the measure value
-   * @param filePath           file from which data will be read
-   */
-  public CompressedMeasureChunkFileBasedReader(List<DataChunk> measureColumnChunk,
-      ValueCompressionModel compressionModel, String filePath) {
-    super(measureColumnChunk, compressionModel, filePath, false);
-  }
-
-  /**
-   * Method to read the blocks data based on block indexes
-   *
-   * @param fileReader   file reader to read the blocks
-   * @param blockIndexes blocks to be read
-   * @return measure data chunks
-   */
-  @Override public MeasureColumnDataChunk[] readMeasureChunks(FileHolder fileReader,
-      int... blockIndexes) {
-    MeasureColumnDataChunk[] datChunk = new MeasureColumnDataChunk[values.length];
-    for (int i = 0; i < blockIndexes.length; i++) {
-      datChunk[blockIndexes[i]] = readMeasureChunk(fileReader, blockIndexes[i]);
-    }
-    return datChunk;
-  }
-
-  /**
-   * Method to read the blocks data based on block index
-   *
-   * @param fileReader file reader to read the blocks
-   * @param blockIndex block to be read
-   * @return measure data chunk
-   */
-  @Override public MeasureColumnDataChunk readMeasureChunk(FileHolder fileReader, int blockIndex) {
-    MeasureColumnDataChunk datChunk = new MeasureColumnDataChunk();
-    // create a new uncompressor
-    ValueCompressonHolder.UnCompressValue copy = values[blockIndex].getNew();
-    // read data from file and set to uncompressor
-    copy.setValue(fileReader
-        .readByteArray(filePath, measureColumnChunk.get(blockIndex).getDataPageOffset(),
-            measureColumnChunk.get(blockIndex).getDataPageLength()));
-    // get the data holder after uncompressing
-    CarbonReadDataHolder measureDataHolder =
-        copy.uncompress(compressionModel.getChangedDataType()[blockIndex])
-            .getValues(compressionModel.getDecimal()[blockIndex],
-                compressionModel.getMaxValue()[blockIndex]);
-    // set the data chunk
-    datChunk.setMeasureDataHolder(measureDataHolder);
-    // set the enun value indexes
-    datChunk
-        .setNullValueIndexHolder(measureColumnChunk.get(blockIndex).getNullValueIndexForColumn());
-    return datChunk;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/exception/IndexBuilderException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/exception/IndexBuilderException.java b/core/src/main/java/org/carbondata/core/carbon/datastore/exception/IndexBuilderException.java
deleted file mode 100644
index 5ca33e3..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/exception/IndexBuilderException.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.datastore.exception;
-
-import java.util.Locale;
-
-/**
- * Exception class for block builder
- *
- * @author Administrator
- */
-public class IndexBuilderException extends Exception {
-  /**
-   * default serial version ID.
-   */
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * The Error message.
-   */
-  private String msg = "";
-
-  /**
-   * Constructor
-   *
-   * @param errorCode The error code for this exception.
-   * @param msg       The error message for this exception.
-   */
-  public IndexBuilderException(String msg) {
-    super(msg);
-    this.msg = msg;
-  }
-
-  /**
-   * Constructor
-   *
-   * @param msg       exception message
-   * @param throwable detail exception
-   */
-  public IndexBuilderException(String msg, Throwable throwable) {
-    super(msg, throwable);
-    this.msg = msg;
-  }
-
-  /**
-   * Constructor
-   *
-   * @param throwable exception
-   */
-  public IndexBuilderException(Throwable throwable) {
-    super(throwable);
-  }
-
-  /**
-   * This method is used to get the localized message.
-   *
-   * @param locale - A Locale object represents a specific geographical,
-   *               political, or cultural region.
-   * @return - Localized error message.
-   */
-  public String getLocalizedMessage(Locale locale) {
-    return "";
-  }
-
-  /**
-   * getLocalizedMessage
-   */
-  @Override public String getLocalizedMessage() {
-    return super.getLocalizedMessage();
-  }
-
-  /**
-   * getMessage
-   */
-  public String getMessage() {
-    return this.msg;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeBuilder.java b/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeBuilder.java
deleted file mode 100644
index 88f03cc..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeBuilder.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.impl.btree;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.BtreeBuilder;
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.CarbonProperties;
-
-/**
- * Abstract Btree based builder
- */
-public abstract class AbstractBTreeBuilder implements BtreeBuilder {
-
-  /**
-   * default Number of keys per page
-   */
-  private static final int DEFAULT_NUMBER_OF_ENTRIES_NONLEAF = 32;
-
-  /**
-   * Maximum number of entries in intermediate nodes
-   */
-  protected int maxNumberOfEntriesInNonLeafNodes;
-
-  /**
-   * Number of leaf nodes
-   */
-  protected int nLeaf;
-
-  /**
-   * root node of a btree
-   */
-  protected BTreeNode root;
-
-  public AbstractBTreeBuilder() {
-    maxNumberOfEntriesInNonLeafNodes = Integer.parseInt(CarbonProperties.getInstance()
-        .getProperty("com.huawei.datastore.internalnodesize",
-            DEFAULT_NUMBER_OF_ENTRIES_NONLEAF + ""));
-  }
-
-  /**
-   * Below method is to build the intermediate node of the btree
-   *
-   * @param curNode              current node
-   * @param childNodeGroups      children group which will have all the children for
-   *                             particular intermediate node
-   * @param currentGroup         current group
-   * @param interNSKeyList       list if keys
-   * @param numberOfInternalNode number of internal node
-   */
-  protected void addIntermediateNode(BTreeNode curNode, List<BTreeNode[]> childNodeGroups,
-      BTreeNode[] currentGroup, List<List<IndexKey>> interNSKeyList, int numberOfInternalNode) {
-
-    int groupCounter;
-    // Build internal nodes level by level. Each upper node can have
-    // upperMaxEntry keys and upperMaxEntry+1 children
-    int remainder;
-    int nHigh = numberOfInternalNode;
-    boolean bRootBuilt = false;
-    remainder = nLeaf % (maxNumberOfEntriesInNonLeafNodes);
-    List<IndexKey> interNSKeys = null;
-    while (nHigh > 1 || !bRootBuilt) {
-      List<BTreeNode[]> internalNodeGroups =
-          new ArrayList<BTreeNode[]>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-      List<List<IndexKey>> interNSKeyTmpList =
-          new ArrayList<List<IndexKey>>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-      numberOfInternalNode = 0;
-      for (int i = 0; i < nHigh; i++) {
-        // Create a new internal node
-        curNode = new BTreeNonLeafNode();
-        // Allocate a new node group if current node group is full
-        groupCounter = i % (maxNumberOfEntriesInNonLeafNodes);
-        if (groupCounter == 0) {
-          // Create new node group
-          currentGroup = new BTreeNonLeafNode[maxNumberOfEntriesInNonLeafNodes];
-          internalNodeGroups.add(currentGroup);
-          numberOfInternalNode++;
-          interNSKeys = new ArrayList<IndexKey>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-          interNSKeyTmpList.add(interNSKeys);
-        }
-
-        // Add the new internal node to current group
-        if (null != currentGroup) {
-          currentGroup[groupCounter] = curNode;
-        }
-        int nNodes;
-
-        if (i == nHigh - 1 && remainder != 0) {
-          nNodes = remainder;
-        } else {
-          nNodes = maxNumberOfEntriesInNonLeafNodes;
-        }
-        // Point the internal node to its children node group
-        curNode.setChildren(childNodeGroups.get(i));
-        // Fill the internal node with keys based on its child nodes
-        for (int j = 0; j < nNodes; j++) {
-          curNode.setKey(interNSKeyList.get(i).get(j));
-          if (j == 0 && null != interNSKeys) {
-            interNSKeys.add(interNSKeyList.get(i).get(j));
-
-          }
-        }
-      }
-      // If nHigh is 1, we have the root node
-      if (nHigh == 1) {
-        bRootBuilt = true;
-      }
-
-      remainder = nHigh % (maxNumberOfEntriesInNonLeafNodes);
-      nHigh = numberOfInternalNode;
-      childNodeGroups = internalNodeGroups;
-      interNSKeyList = interNSKeyTmpList;
-    }
-    root = curNode;
-  }
-
-  /**
-   * Below method is to convert the start key
-   * into fixed and variable length key.
-   * data format<lenght><fixed length key><length><variable length key>
-   *
-   * @param startKey
-   * @return Index key
-   */
-  protected IndexKey convertStartKeyToNodeEntry(byte[] startKey) {
-    ByteBuffer buffer = ByteBuffer.wrap(startKey);
-    buffer.rewind();
-    int dictonaryKeySize = buffer.getInt();
-    int nonDictonaryKeySize = buffer.getInt();
-    byte[] dictionaryKey = new byte[dictonaryKeySize];
-    buffer.get(dictionaryKey);
-    byte[] nonDictionaryKey = new byte[nonDictonaryKeySize];
-    buffer.get(nonDictionaryKey);
-    IndexKey entry = new IndexKey(dictionaryKey, nonDictionaryKey);
-    return entry;
-  }
-
-  /**
-   * Below method will be used to get the first data block
-   * in Btree case it will be root node
-   */
-  @Override public BTreeNode get() {
-    return root;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeLeafNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeLeafNode.java b/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeLeafNode.java
deleted file mode 100644
index b038afe..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/AbstractBTreeLeafNode.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.impl.btree;
-
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
-import org.carbondata.core.datastorage.store.FileHolder;
-
-/**
- * Non leaf node abstract class
- */
-public abstract class AbstractBTreeLeafNode implements BTreeNode {
-
-  /**
-   * number of keys in a btree
-   */
-  protected int numberOfKeys;
-
-  /**
-   * node number
-   */
-  protected long nodeNumber;
-
-  /**
-   * Next node of the leaf
-   */
-  protected BTreeNode nextNode;
-
-  /**
-   * max key of the column this will be used to check whether this leaf will
-   * be used for scanning or not
-   */
-  protected byte[][] maxKeyOfColumns;
-
-  /**
-   * min key of the column this will be used to check whether this leaf will
-   * be used for scanning or not
-   */
-  protected byte[][] minKeyOfColumns;
-
-  /**
-   * Method to get the next block this can be used while scanning when
-   * iterator of this class can be used iterate over blocks
-   *
-   * @return next block
-   */
-  @Override public int nodeSize() {
-    return this.numberOfKeys;
-  }
-
-  /**
-   * below method will used to set the next node
-   *
-   * @param nextNode
-   */
-  @Override public void setNextNode(BTreeNode nextNode) {
-    this.nextNode = nextNode;
-  }
-
-  /**
-   * Below method is to get the children based on index
-   *
-   * @param index children index
-   * @return btree node
-   */
-  @Override public BTreeNode getChild(int index) {
-    throw new UnsupportedOperationException("Operation not supported in case of leaf node");
-  }
-
-  /**
-   * below method to set the node entry
-   *
-   * @param key node entry
-   */
-  @Override public void setKey(IndexKey key) {
-    throw new UnsupportedOperationException("Operation not supported in case of leaf node");
-  }
-
-  /**
-   * Method can be used to get the block index .This can be used when multiple
-   * thread can be used scan group of blocks in that can we can assign the
-   * some of the blocks to one thread and some to other
-   *
-   * @return block number
-   */
-  @Override public long nodeNumber() {
-    return nodeNumber;
-  }
-
-  /**
-   * This method will be used to get the max value of all the columns this can
-   * be used in case of filter query
-   *
-   * @param max value of all the columns
-   */
-  @Override public byte[][] getColumnsMaxValue() {
-    return maxKeyOfColumns;
-  }
-
-  /**
-   * This method will be used to get the max value of all the columns this can
-   * be used in case of filter query
-   *
-   * @param max value of all the columns
-   */
-  @Override public byte[][] getColumnsMinValue() {
-    return minKeyOfColumns;
-  }
-
-  /**
-   * to check whether node in a btree is a leaf node or not
-   *
-   * @return leaf node or not
-   */
-  @Override public boolean isLeafNode() {
-    return true;
-  }
-
-  /**
-   * Method to get the next block this can be used while scanning when
-   * iterator of this class can be used iterate over blocks
-   *
-   * @return next block
-   */
-  @Override public DataRefNode getNextDataRefNode() {
-    return nextNode;
-  }
-
-  /**
-   * below method will return the one node indexes
-   *
-   * @return node entry array
-   */
-  @Override public IndexKey[] getNodeKeys() {
-    // as this is a leaf node so this method implementation is not required
-    throw new UnsupportedOperationException("Operation not supported in case of leaf node");
-  }
-
-  /**
-   * below method will be used to set the children of intermediate node
-   *
-   * @param children array
-   */
-  @Override public void setChildren(BTreeNode[] children) {
-    // no required in case of leaf node as leaf node will not have any children
-    throw new UnsupportedOperationException("Operation not supported in case of leaf node");
-  }
-
-  /**
-   * Below method will be used to get the dimension chunks
-   *
-   * @param fileReader   file reader to read the chunks from file
-   * @param blockIndexes indexes of the blocks need to be read
-   * @return dimension data chunks
-   */
-  @Override public DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader,
-      int[] blockIndexes) {
-    // No required here as leaf which will will be use this class will implement its own get
-    // dimension chunks
-    return null;
-  }
-
-  /**
-   * Below method will be used to get the dimension chunk
-   *
-   * @param fileReader file reader to read the chunk from file
-   * @param blockIndex block index to be read
-   * @return dimension data chunk
-   */
-  @Override public DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader,
-      int blockIndex) {
-    // No required here as leaf which will will be use this class will implement
-    // its own get dimension chunks
-    return null;
-  }
-
-  /**
-   * Below method will be used to get the measure chunk
-   *
-   * @param fileReader   file reader to read the chunk from file
-   * @param blockIndexes block indexes to be read from file
-   * @return measure column data chunk
-   */
-  @Override public MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader,
-      int[] blockIndexes) {
-    // No required here as leaf which will will be use this class will implement its own get
-    // measure chunks
-    return null;
-  }
-
-  /**
-   * Below method will be used to read the measure chunk
-   *
-   * @param fileReader file read to read the file chunk
-   * @param blockIndex block index to be read from file
-   * @return measure data chunk
-   */
-  @Override public MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex) {
-    // No required here as leaf which will will be use this class will implement its own get
-    // measure chunks
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java b/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java
deleted file mode 100644
index e60dad7..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.impl.btree;
-
-import java.nio.ByteBuffer;
-
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.carbon.datastore.DataRefNodeFinder;
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.util.ByteUtil;
-
-/**
- * Below class will be used to find a block in a btree
- */
-public class BTreeDataRefNodeFinder implements DataRefNodeFinder {
-
-  /**
-   * no dictionary column value is of variable length so in each column value
-   * it will -1
-   */
-  private static final int NO_DCITIONARY_COLUMN_VALUE = -1;
-
-  /**
-   * sized of the short value in bytes
-   */
-  private static final short SHORT_SIZE_IN_BYTES = 2;
-  /**
-   * this will holds the information about the size of each value of a column,
-   * this will be used during Comparison of the btree node value and the
-   * search value if value is more than zero then its a fixed length column
-   * else its variable length column. So as data of both type of column store
-   * separately so this value size array will be used for both purpose
-   * comparison and jumping(which type value we need to compare)
-   */
-  private int[] eachColumnValueSize;
-
-  /**
-   * this will be used during search for no dictionary column
-   */
-  private int numberOfNoDictionaryColumns;
-
-  public BTreeDataRefNodeFinder(int[] eachColumnValueSize) {
-    this.eachColumnValueSize = eachColumnValueSize;
-
-    for (int i = 0; i < eachColumnValueSize.length; i++) {
-      if (eachColumnValueSize[i] == -1) {
-        numberOfNoDictionaryColumns++;
-      }
-    }
-  }
-
-  /**
-   * Below method will be used to get the first tentative data block based on
-   * search key
-   *
-   * @param dataBlocks complete data blocks present
-   * @param serachKey  key to be search
-   * @return data block
-   */
-  @Override public DataRefNode findFirstDataBlock(DataRefNode dataRefBlock, IndexKey searchKey) {
-    // as its for btree type cast it to btree interface
-    BTreeNode rootNode = (BTreeNode) dataRefBlock;
-    while (!rootNode.isLeafNode()) {
-      rootNode = findFirstLeafNode(searchKey, rootNode);
-    }
-    return rootNode;
-  }
-
-  /**
-   * Below method will be used to get the last data tentative block based on
-   * search key
-   *
-   * @param dataBlocks complete data blocks present
-   * @param serachKey  key to be search
-   * @return data block
-   */
-  @Override public DataRefNode findLastDataBlock(DataRefNode dataRefBlock, IndexKey searchKey) {
-    // as its for btree type cast it to btree interface
-    BTreeNode rootNode = (BTreeNode) dataRefBlock;
-    while (!rootNode.isLeafNode()) {
-      rootNode = findLastLeafNode(searchKey, rootNode);
-    }
-    return rootNode;
-  }
-
-  /**
-   * Binary search used to get the first tentative block of the btree based on
-   * search key
-   *
-   * @param key  search key
-   * @param node root node of btree
-   * @return first tentative block
-   */
-  private BTreeNode findFirstLeafNode(IndexKey key, BTreeNode node) {
-    int childNodeIndex;
-    int low = 0;
-    int high = node.nodeSize() - 1;
-    int mid = 0;
-    int compareRes = -1;
-    IndexKey[] nodeKeys = node.getNodeKeys();
-    //
-    while (low <= high) {
-      mid = (low + high) >>> 1;
-      // compare the entries
-      compareRes = compareIndexes(key, nodeKeys[mid]);
-      if (compareRes < 0) {
-        high = mid - 1;
-      } else if (compareRes > 0) {
-        low = mid + 1;
-      } else {
-        // if key is matched then get the first entry
-        int currentPos = mid;
-        while (currentPos - 1 >= 0 && compareIndexes(key, nodeKeys[currentPos - 1]) == 0) {
-          currentPos--;
-        }
-        mid = currentPos;
-        break;
-      }
-    }
-    // if compare result is less than zero then we
-    // and mid is more than 0 then we need to previous block as duplicates
-    // record can be present
-    if (compareRes < 0) {
-      if (mid > 0) {
-        mid--;
-      }
-      childNodeIndex = mid;
-    } else {
-      childNodeIndex = mid;
-    }
-    // get the leaf child
-    node = node.getChild(childNodeIndex);
-    return node;
-  }
-
-  /**
-   * Binary search used to get the last tentative block of the btree based on
-   * search key
-   *
-   * @param key  search key
-   * @param node root node of btree
-   * @return first tentative block
-   */
-  private BTreeNode findLastLeafNode(IndexKey key, BTreeNode node) {
-    int childNodeIndex;
-    int low = 0;
-    int high = node.nodeSize() - 1;
-    int mid = 0;
-    int compareRes = -1;
-    IndexKey[] nodeKeys = node.getNodeKeys();
-    //
-    while (low <= high) {
-      mid = (low + high) >>> 1;
-      // compare the entries
-      compareRes = compareIndexes(key, nodeKeys[mid]);
-      if (compareRes < 0) {
-        high = mid - 1;
-      } else if (compareRes > 0) {
-        low = mid + 1;
-      } else {
-        int currentPos = mid;
-        // if key is matched then get the first entry
-        while (currentPos + 1 < node.nodeSize()
-            && compareIndexes(key, nodeKeys[currentPos + 1]) == 0) {
-          currentPos++;
-        }
-        mid = currentPos;
-        break;
-      }
-    }
-    // if compare result is less than zero then we
-    // and mid is more than 0 then we need to previous block as duplicates
-    // record can be present
-    if (compareRes < 0) {
-      if (mid > 0) {
-        mid--;
-      }
-      childNodeIndex = mid;
-    } else {
-      childNodeIndex = mid;
-    }
-    node = node.getChild(childNodeIndex);
-    return node;
-  }
-
-  /**
-   * Comparison of index key will be following format of key <Dictionary> key
-   * will be in byte array No dictionary key Index of FirstKey (2
-   * bytes)><Index of SecondKey (2 bytes)><Index of NKey (2 bytes)> <First Key
-   * ByteArray><2nd Key ByteArray><N Key ByteArray> in each column value size
-   * of no dictionary column will be -1 if in each column value is not -1 then
-   * compare the byte array based on size and increment the offset to
-   * dictionary column size if size is -1 then its a no dictionary key so to
-   * get the length subtract the size of current with next key offset it will
-   * give the actual length if it is at last position or only one key is
-   * present then subtract with length
-   *
-   * @param first  key
-   * @param second key
-   * @return comparison value
-   */
-  private int compareIndexes(IndexKey first, IndexKey second) {
-    int dictionaryKeyOffset = 0;
-    int nonDictionaryKeyOffset = 0;
-    int compareResult = 0;
-    int processedNoDictionaryColumn = numberOfNoDictionaryColumns;
-    ByteBuffer firstNoDictionaryKeyBuffer = ByteBuffer.wrap(first.getNoDictionaryKeys());
-    ByteBuffer secondNoDictionaryKeyBuffer = ByteBuffer.wrap(second.getNoDictionaryKeys());
-    int actualOffset = 0;
-    int firstNoDcitionaryLength = 0;
-    int secondNodeDictionaryLength = 0;
-
-    for (int i = 0; i < eachColumnValueSize.length; i++) {
-
-      if (eachColumnValueSize[i] != NO_DCITIONARY_COLUMN_VALUE) {
-        compareResult = ByteUtil.UnsafeComparer.INSTANCE
-            .compareTo(first.getDictionaryKeys(), dictionaryKeyOffset, eachColumnValueSize[i],
-                second.getDictionaryKeys(), dictionaryKeyOffset, eachColumnValueSize[i]);
-        dictionaryKeyOffset += eachColumnValueSize[i];
-      } else {
-        if (processedNoDictionaryColumn > 1) {
-          actualOffset = firstNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset);
-          firstNoDcitionaryLength =
-              firstNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset + SHORT_SIZE_IN_BYTES);
-          secondNodeDictionaryLength =
-              secondNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset + SHORT_SIZE_IN_BYTES);
-          compareResult = ByteUtil.UnsafeComparer.INSTANCE
-              .compareTo(first.getNoDictionaryKeys(), actualOffset, firstNoDcitionaryLength,
-                  second.getNoDictionaryKeys(), actualOffset, secondNodeDictionaryLength);
-          nonDictionaryKeyOffset += SHORT_SIZE_IN_BYTES;
-          processedNoDictionaryColumn--;
-        } else {
-          actualOffset = firstNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset);
-          firstNoDcitionaryLength = first.getNoDictionaryKeys().length - actualOffset;
-          secondNodeDictionaryLength = second.getNoDictionaryKeys().length - actualOffset;
-          compareResult = ByteUtil.UnsafeComparer.INSTANCE
-              .compareTo(first.getNoDictionaryKeys(), actualOffset, firstNoDcitionaryLength,
-                  second.getNoDictionaryKeys(), actualOffset, secondNodeDictionaryLength);
-        }
-      }
-      if (compareResult != 0) {
-        return compareResult;
-      }
-    }
-
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeNode.java b/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeNode.java
deleted file mode 100644
index 9468cb5..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeNode.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.impl.btree;
-
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.carbon.datastore.IndexKey;
-
-/**
- * Interface for btree node
- */
-public interface BTreeNode extends DataRefNode {
-
-  /**
-   * below method will return the one node indexes
-   *
-   * @return node entry array
-   */
-  IndexKey[] getNodeKeys();
-
-  /**
-   * to check whether node in a btree is a leaf node or not
-   *
-   * @return leaf node or not
-   */
-  boolean isLeafNode();
-
-  /**
-   * below method will be used to set the children of intermediate node
-   *
-   * @param children array
-   */
-  void setChildren(BTreeNode[] children);
-
-  /**
-   * below method will used to set the next node
-   *
-   * @param nextNode
-   */
-  void setNextNode(BTreeNode nextNode);
-
-  /**
-   * Below method is to get the children based on index
-   *
-   * @param index children index
-   * @return btree node
-   */
-  BTreeNode getChild(int index);
-
-  /**
-   * below method to set the node entry
-   *
-   * @param key node entry
-   */
-  void setKey(IndexKey key);
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeNonLeafNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeNonLeafNode.java b/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeNonLeafNode.java
deleted file mode 100644
index ad49c0b..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BTreeNonLeafNode.java
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.impl.btree;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.DataRefNode;
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.datastorage.store.FileHolder;
-
-/**
- * No leaf node of a b+tree class which will keep the matadata(start key) of the
- * leaf node
- */
-public class BTreeNonLeafNode implements BTreeNode {
-
-  /**
-   * Child nodes
-   */
-  private BTreeNode[] children;
-
-  /**
-   * list of keys in non leaf
-   */
-  private List<IndexKey> listOfKeys;
-
-  public BTreeNonLeafNode() {
-    // creating a list which will store all the indexes
-    listOfKeys = new ArrayList<IndexKey>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-  }
-
-  /**
-   * below method will return the one node indexes
-   *
-   * @return getting a complete leaf ]node keys
-   */
-  @Override public IndexKey[] getNodeKeys() {
-    return listOfKeys.toArray(new IndexKey[listOfKeys.size()]);
-  }
-
-  /**
-   * as it is a non leaf node it will have the reference of all the leaf node
-   * under it, setting all the children
-   *
-   * @param leaf nodes
-   */
-  @Override public void setChildren(BTreeNode[] children) {
-    this.children = children;
-  }
-
-  /**
-   * setting the next node
-   */
-  @Override public void setNextNode(BTreeNode nextNode) {
-    // no required in case of non leaf node
-  }
-
-  /**
-   * get the leaf node based on children
-   *
-   * @return leaf node
-   */
-  @Override public BTreeNode getChild(int index) {
-    return this.children[index];
-  }
-
-  /**
-   * add a key of a leaf node
-   *
-   * @param leaf node start keys
-   */
-  @Override public void setKey(IndexKey key) {
-    listOfKeys.add(key);
-
-  }
-
-  /**
-   * @return whether its a leaf node or not
-   */
-  @Override public boolean isLeafNode() {
-    return false;
-  }
-
-  /**
-   * Method to get the next block this can be used while scanning when
-   * iterator of this class can be used iterate over blocks
-   *
-   * @return next block
-   */
-  @Override public DataRefNode getNextDataRefNode() {
-    throw new UnsupportedOperationException("Unsupported operation");
-  }
-
-  /**
-   * to get the number of keys tuples present in the block
-   *
-   * @return number of keys in the block
-   */
-  @Override public int nodeSize() {
-    return listOfKeys.size();
-  }
-
-  /**
-   * Method can be used to get the block index .This can be used when multiple
-   * thread can be used scan group of blocks in that can we can assign the
-   * some of the blocks to one thread and some to other
-   *
-   * @return block number
-   */
-  @Override public long nodeNumber() {
-    throw new UnsupportedOperationException("Unsupported operation");
-  }
-
-  /**
-   * This method will be used to get the max value of all the columns this can
-   * be used in case of filter query
-   *
-   * @param max value of all the columns
-   */
-  @Override public byte[][] getColumnsMaxValue() {
-    // operation of getting the max value is not supported as its a non leaf
-    // node
-    // and in case of B+Tree data will be stored only in leaf node and
-    // intermediate
-    // node will be used only for searching the leaf node
-    throw new UnsupportedOperationException("Unsupported operation");
-  }
-
-  /**
-   * This method will be used to get the max value of all the columns this can
-   * be used in case of filter query
-   *
-   * @param min value of all the columns
-   */
-  @Override public byte[][] getColumnsMinValue() {
-    // operation of getting the min value is not supported as its a non leaf
-    // node
-    // and in case of B+Tree data will be stored only in leaf node and
-    // intermediate
-    // node will be used only for searching the leaf node
-    throw new UnsupportedOperationException("Unsupported operation");
-  }
-
-  /**
-   * Below method will be used to get the dimension chunks
-   *
-   * @param fileReader   file reader to read the chunks from file
-   * @param blockIndexes indexes of the blocks need to be read
-   * @return dimension data chunks
-   */
-  @Override public DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader,
-      int[] blockIndexes) {
-
-    // operation of getting the dimension chunks is not supported as its a
-    // non leaf node
-    // and in case of B+Tree data will be stored only in leaf node and
-    // intermediate
-    // node will be used only for searching the leaf node
-    throw new UnsupportedOperationException("Unsupported operation");
-  }
-
-  /**
-   * Below method will be used to get the dimension chunk
-   *
-   * @param fileReader file reader to read the chunk from file
-   * @param blockIndex block index to be read
-   * @return dimension data chunk
-   */
-  @Override public DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader,
-      int blockIndexes) {
-    // operation of getting the dimension chunk is not supported as its a
-    // non leaf node
-    // and in case of B+Tree data will be stored only in leaf node and
-    // intermediate
-    // node will be used only for searching the leaf node
-    throw new UnsupportedOperationException("Unsupported operation");
-  }
-
-  /**
-   * Below method will be used to get the measure chunk
-   *
-   * @param fileReader   file reader to read the chunk from file
-   * @param blockIndexes block indexes to be read from file
-   * @return measure column data chunk
-   */
-  @Override public MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader,
-      int[] blockIndexes) {
-    // operation of getting the measure chunk is not supported as its a non
-    // leaf node
-    // and in case of B+Tree data will be stored only in leaf node and
-    // intermediate
-    // node will be used only for searching the leaf node
-    throw new UnsupportedOperationException("Unsupported operation");
-  }
-
-  /**
-   * Below method will be used to read the measure chunk
-   *
-   * @param fileReader file read to read the file chunk
-   * @param blockIndex block index to be read from file
-   * @return measure data chunk
-   */
-
-  @Override public MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex) {
-    // operation of getting the measure chunk is not supported as its a non
-    // leaf node
-    // and in case of B+Tree data will be stored only in leaf node and
-    // intermediate
-    // node will be used only for searching the leaf node
-    throw new UnsupportedOperationException("Unsupported operation");
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockBTreeBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockBTreeBuilder.java b/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockBTreeBuilder.java
deleted file mode 100644
index 7ff3929..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockBTreeBuilder.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.impl.btree;
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.datastore.BTreeBuilderInfo;
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * Below class will be used to build the btree BTree will be built for all the
- * blocks of a segment
- */
-public class BlockBTreeBuilder extends AbstractBTreeBuilder {
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(BlockBTreeBuilder.class.getName());
-
-  /**
-   * Below method will be used to build the segment info bplus tree format
-   * Tree will be a read only tree, and it will be build on Bottoms up
-   * approach first all the leaf node will be built and then intermediate node
-   * in our case one leaf node will have not only one entry it will have group
-   * of entries
-   */
-  @Override public void build(BTreeBuilderInfo btreeBuilderInfo) {
-    int groupCounter;
-    int nInternal = 0;
-    BTreeNode curNode = null;
-    BTreeNode prevNode = null;
-    List<BTreeNode[]> nodeGroups =
-        new ArrayList<BTreeNode[]>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-    BTreeNode[] currentGroup = null;
-    List<List<IndexKey>> interNSKeyList =
-        new ArrayList<List<IndexKey>>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-    List<IndexKey> leafNSKeyList = null;
-    long nodeNumber = 0;
-    for (int metadataIndex = 0;
-         metadataIndex < btreeBuilderInfo.getFooterList().size(); metadataIndex++) {
-      // creating a leaf node
-      curNode = new BlockBTreeLeafNode(btreeBuilderInfo, metadataIndex, nodeNumber++);
-      nLeaf++;
-      // setting a next node as its a b+tree
-      // so all the leaf node will be chained
-      // will be stored in linked list
-      if (prevNode != null) {
-        prevNode.setNextNode(curNode);
-      }
-      prevNode = curNode;
-      // as intermediate node will have more than one leaf
-      // in cerating a group
-      groupCounter = (nLeaf - 1) % (maxNumberOfEntriesInNonLeafNodes);
-      if (groupCounter == 0) {
-        // Create new node group if current group is full
-        leafNSKeyList = new ArrayList<IndexKey>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-        currentGroup = new BTreeNode[maxNumberOfEntriesInNonLeafNodes];
-        nodeGroups.add(currentGroup);
-        nInternal++;
-        interNSKeyList.add(leafNSKeyList);
-      }
-      if (null != leafNSKeyList) {
-        leafNSKeyList.add(convertStartKeyToNodeEntry(
-            btreeBuilderInfo.getFooterList().get(metadataIndex).getBlockletIndex()
-                .getBtreeIndex().getStartKey()));
-      }
-      if (null != currentGroup) {
-        currentGroup[groupCounter] = curNode;
-      }
-    }
-    if (nLeaf == 0) {
-      return;
-    }
-    // adding a intermediate node
-    addIntermediateNode(curNode, nodeGroups, currentGroup, interNSKeyList, nInternal);
-    LOGGER.info("************************Total Number Rows In BTREE: " + nLeaf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockBTreeLeafNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockBTreeLeafNode.java b/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockBTreeLeafNode.java
deleted file mode 100644
index 6b63961..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockBTreeLeafNode.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.impl.btree;
-
-import org.carbondata.core.carbon.datastore.BTreeBuilderInfo;
-import org.carbondata.core.carbon.datastore.block.TableBlockInfo;
-import org.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletMinMaxIndex;
-
-/**
- * Leaf node for btree where only min max will be store this can be used from
- * driver when only we need to find whether particular block be selected for
- * query execution
- */
-public class BlockBTreeLeafNode extends AbstractBTreeLeafNode {
-
-  private TableBlockInfo blockInfo;
-
-  /**
-   * Create a leaf node
-   *
-   * @param builderInfos  builder infos which have required metadata to create a leaf
-   *                      node
-   * @param leafIndex     leaf node index
-   * @param metadataIndex metadata index
-   */
-  public BlockBTreeLeafNode(BTreeBuilderInfo builderInfos, int metadataIndex, long nodeNumber) {
-    DataFileFooter footer = builderInfos.getFooterList().get(metadataIndex);
-    BlockletMinMaxIndex minMaxIndex = footer.getBlockletIndex().getMinMaxIndex();
-    maxKeyOfColumns = minMaxIndex.getMaxValues();
-    minKeyOfColumns = minMaxIndex.getMinValues();
-    numberOfKeys = 1;
-    this.nodeNumber = nodeNumber;
-    this.blockInfo = footer.getTableBlockInfo();
-  }
-
-  /**
-   * Below method is to get the table block info
-   * This will be used only in case of BlockBtree leaf node which will
-   * be used to from driver
-   *
-   * @return TableBlockInfo
-   */
-  public TableBlockInfo getTableBlockInfo() {
-    return blockInfo;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeBuilder.java b/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeBuilder.java
deleted file mode 100644
index 0cc62f7..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeBuilder.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.impl.btree;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.datastore.BTreeBuilderInfo;
-import org.carbondata.core.carbon.datastore.IndexKey;
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * Btree based builder which will build the leaf node in a b+ tree format
- */
-public class BlockletBTreeBuilder extends AbstractBTreeBuilder {
-
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(BlockletBTreeBuilder.class.getName());
-
-  /**
-   * Below method will be used to build the segment info bplus tree format
-   * Tree will be a read only tree, and it will be build on Bottoms up approach
-   * first all the leaf node will be built and then intermediate node
-   * in our case one leaf node will have not only one entry it will have group of entries
-   */
-  @Override public void build(BTreeBuilderInfo segmentBuilderInfos) {
-    long totalNumberOfTuple = 0;
-    int groupCounter;
-    int nInternal = 0;
-    BTreeNode curNode = null;
-    BTreeNode prevNode = null;
-    List<BTreeNode[]> nodeGroups =
-        new ArrayList<BTreeNode[]>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    BTreeNode[] currentGroup = null;
-    List<List<IndexKey>> interNSKeyList =
-        new ArrayList<List<IndexKey>>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    List<IndexKey> leafNSKeyList = null;
-    long nodeNumber = 0;
-    for (int index = 0;
-         index < segmentBuilderInfos.getFooterList().get(0).getBlockletList()
-             .size(); index++) {
-      // creating a leaf node
-      curNode = new BlockletBTreeLeafNode(segmentBuilderInfos, index, nodeNumber++);
-      totalNumberOfTuple +=
-          segmentBuilderInfos.getFooterList().get(0).getBlockletList().get(index)
-              .getNumberOfRows();
-      nLeaf++;
-      // setting a next node as its a b+tree
-      // so all the leaf node will be chained
-      // will be stored in linked list
-      if (prevNode != null) {
-        prevNode.setNextNode(curNode);
-      }
-      prevNode = curNode;
-      // as intermediate node will have more than one leaf
-      // in cerating a group
-      groupCounter = (nLeaf - 1) % (maxNumberOfEntriesInNonLeafNodes);
-      if (groupCounter == 0) {
-        // Create new node group if current group is full
-        leafNSKeyList = new ArrayList<IndexKey>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-        currentGroup = new BTreeNode[maxNumberOfEntriesInNonLeafNodes];
-        nodeGroups.add(currentGroup);
-        nInternal++;
-        interNSKeyList.add(leafNSKeyList);
-      }
-      if (null != leafNSKeyList) {
-        leafNSKeyList.add(convertStartKeyToNodeEntry(
-            segmentBuilderInfos.getFooterList().get(0).getBlockletList().get(index)
-                .getBlockletIndex().getBtreeIndex().getStartKey()));
-      }
-      if (null != currentGroup) {
-        currentGroup[groupCounter] = curNode;
-      }
-    }
-    if (totalNumberOfTuple == 0) {
-      return;
-    }
-    // adding a intermediate node
-    addIntermediateNode(curNode, nodeGroups, currentGroup, interNSKeyList, nInternal);
-    LOGGER.info("****************************Total Number Rows In BTREE: " + totalNumberOfTuple);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeLeafNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeLeafNode.java b/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeLeafNode.java
deleted file mode 100644
index 12cadf4..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeLeafNode.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.datastore.impl.btree;
-
-import org.carbondata.core.carbon.datastore.BTreeBuilderInfo;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.reader.DimensionColumnChunkReader;
-import org.carbondata.core.carbon.datastore.chunk.reader.MeasureColumnChunkReader;
-import org.carbondata.core.carbon.datastore.chunk.reader.dimension.CompressedDimensionChunkFileBasedReader;
-import org.carbondata.core.carbon.datastore.chunk.reader.measure.CompressedMeasureChunkFileBasedReader;
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletMinMaxIndex;
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.compression.ValueCompressionModel;
-import org.carbondata.core.util.CarbonUtil;
-
-/**
- * Leaf node class of a Blocklet btree
- */
-public class BlockletBTreeLeafNode extends AbstractBTreeLeafNode {
-
-  /**
-   * reader for dimension chunk
-   */
-  private DimensionColumnChunkReader dimensionChunksReader;
-
-  /**
-   * reader of measure chunk
-   */
-  private MeasureColumnChunkReader measureColumnChunkReader;
-
-  /**
-   * Create a leaf node
-   *
-   * @param builderInfos builder infos which have required metadata to create a leaf node
-   * @param leafIndex    leaf node index
-   * @param nodeNumber   node number of the node
-   *                     this will be used during query execution when we can
-   *                     give some leaf node of a btree to one executor some to other
-   */
-  public BlockletBTreeLeafNode(BTreeBuilderInfo builderInfos, int leafIndex, long nodeNumber) {
-    // get a lead node min max
-    BlockletMinMaxIndex minMaxIndex =
-        builderInfos.getFooterList().get(0).getBlockletList().get(leafIndex)
-            .getBlockletIndex().getMinMaxIndex();
-    // max key of the columns
-    maxKeyOfColumns = minMaxIndex.getMaxValues();
-    // min keys of the columns
-    minKeyOfColumns = minMaxIndex.getMinValues();
-    // number of keys present in the leaf
-    numberOfKeys = builderInfos.getFooterList().get(0).getBlockletList().get(leafIndex)
-        .getNumberOfRows();
-    // create a instance of dimension chunk
-    dimensionChunksReader = new CompressedDimensionChunkFileBasedReader(
-        builderInfos.getFooterList().get(0).getBlockletList().get(leafIndex)
-            .getDimensionColumnChunk(), builderInfos.getDimensionColumnValueSize(),
-        builderInfos.getFooterList().get(0).getTableBlockInfo().getFilePath());
-    // get the value compression model which was used to compress the measure values
-    ValueCompressionModel valueCompressionModel = CarbonUtil.getValueCompressionModel(
-        builderInfos.getFooterList().get(0).getBlockletList().get(leafIndex)
-            .getMeasureColumnChunk());
-    // create a instance of measure column chunk reader
-    measureColumnChunkReader = new CompressedMeasureChunkFileBasedReader(
-        builderInfos.getFooterList().get(0).getBlockletList().get(leafIndex)
-            .getMeasureColumnChunk(), valueCompressionModel,
-            builderInfos.getFooterList().get(0).getTableBlockInfo().getFilePath());
-    this.nodeNumber = nodeNumber;
-  }
-
-  /**
-   * Below method will be used to get the dimension chunks
-   *
-   * @param fileReader   file reader to read the chunks from file
-   * @param blockIndexes indexes of the blocks need to be read
-   * @return dimension data chunks
-   */
-  @Override public DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader,
-      int[] blockIndexes) {
-    return dimensionChunksReader.readDimensionChunks(fileReader, blockIndexes);
-  }
-
-  /**
-   * Below method will be used to get the dimension chunk
-   *
-   * @param fileReader file reader to read the chunk from file
-   * @param blockIndex block index to be read
-   * @return dimension data chunk
-   */
-  @Override public DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader,
-      int blockIndex) {
-    return dimensionChunksReader.readDimensionChunk(fileReader, blockIndex);
-  }
-
-  /**
-   * Below method will be used to get the measure chunk
-   *
-   * @param fileReader   file reader to read the chunk from file
-   * @param blockIndexes block indexes to be read from file
-   * @return measure column data chunk
-   */
-  @Override public MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader,
-      int[] blockIndexes) {
-    return measureColumnChunkReader.readMeasureChunks(fileReader, blockIndexes);
-  }
-
-  /**
-   * Below method will be used to read the measure chunk
-   *
-   * @param fileReader file read to read the file chunk
-   * @param blockIndex block index to be read from file
-   * @return measure data chunk
-   */
-  @Override public MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex) {
-    return measureColumnChunkReader.readMeasureChunk(fileReader, blockIndex);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/CarbonMetadata.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/CarbonMetadata.java b/core/src/main/java/org/carbondata/core/carbon/metadata/CarbonMetadata.java
deleted file mode 100644
index 11f159b..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/CarbonMetadata.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata;
-
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.carbondata.core.carbon.metadata.schema.table.CarbonTable;
-import org.carbondata.core.carbon.metadata.schema.table.TableInfo;
-import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
-
-/**
- * Class which persist the information about the tables present the carbon schemas
- */
-public final class CarbonMetadata {
-
-  /**
-   * meta data instance
-   */
-  private static final CarbonMetadata CARBONMETADATAINSTANCE = new CarbonMetadata();
-
-  /**
-   * holds the list of tableInfo currently present
-   */
-  private Map<String, CarbonTable> tableInfoMap;
-
-  private CarbonMetadata() {
-    // creating a concurrent map as it will be updated by multiple thread
-    tableInfoMap = new ConcurrentHashMap<String, CarbonTable>();
-  }
-
-  public static CarbonMetadata getInstance() {
-    return CARBONMETADATAINSTANCE;
-  }
-
-  /**
-   * removed the table information
-   *
-   * @param tableUniquName
-   */
-  public void removeTable(String tableUniquName) {
-    tableInfoMap.remove(convertToLowerCase(tableUniquName));
-  }
-
-  /**
-   * Below method will be used to set the carbon table
-   * This method will be used in executor side as driver will always have
-   * updated table so from driver during query execution and data loading
-   * we just need to add the table
-   *
-   * @param carbonTable
-   */
-  public void addCarbonTable(CarbonTable carbonTable) {
-    tableInfoMap.put(convertToLowerCase(carbonTable.getTableUniqueName()), carbonTable);
-  }
-
-  /**
-   * method load the table
-   *
-   * @param tableInfo
-   */
-  public void loadTableMetadata(TableInfo tableInfo) {
-    CarbonTable carbonTable = tableInfoMap.get(convertToLowerCase(tableInfo.getTableUniqueName()));
-    if (null == carbonTable || carbonTable.getTableLastUpdatedTime() < tableInfo
-        .getLastUpdatedTime()) {
-      carbonTable = new CarbonTable();
-      carbonTable.loadCarbonTable(tableInfo);
-      tableInfoMap.put(convertToLowerCase(tableInfo.getTableUniqueName()), carbonTable);
-    }
-  }
-
-  /**
-   * Below method to get the loaded carbon table
-   *
-   * @param tableUniqueName
-   * @return
-   */
-  public CarbonTable getCarbonTable(String tableUniqueName) {
-    return tableInfoMap.get(convertToLowerCase(tableUniqueName));
-  }
-
-  /**
-   * @return the number of tables present in the schema
-   */
-  public int getNumberOfTables() {
-    return tableInfoMap.size();
-  }
-
-  /**
-   * returns the given string in lowercase
-   * @param table
-   * @return
-   */
-  public String convertToLowerCase(String table) {
-    return table.toLowerCase();
-  }
-
-  /**
-   * method will return dimension instance based on the column identifier
-   * and table instance passed to it.
-   *
-   * @param carbonTable
-   * @param columnIdentifier
-   * @return CarbonDimension instance
-   */
-  public CarbonDimension getCarbonDimensionBasedOnColIdentifier(CarbonTable carbonTable,
-      String columnIdentifier) {
-    List<CarbonDimension> listOfCarbonDims =
-        carbonTable.getDimensionByTableName(carbonTable.getFactTableName());
-    for (CarbonDimension dimension : listOfCarbonDims) {
-      if (dimension.getColumnId().equals(columnIdentifier)) {
-        return dimension;
-      }
-      if (dimension.numberOfChild() > 0) {
-        CarbonDimension childDim =
-            getCarbonChildDimsBasedOnColIdentifier(columnIdentifier, dimension);
-        if (null != childDim) {
-          return childDim;
-        }
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Below method will be used to get the dimension based on column identifier
-   * for complex dimension children
-   *
-   * @param columnIdentifier column identifier
-   * @param dimension        parent dimension
-   * @return children dimension
-   */
-  private CarbonDimension getCarbonChildDimsBasedOnColIdentifier(String columnIdentifier,
-      CarbonDimension dimension) {
-    for (int i = 0; i < dimension.numberOfChild(); i++) {
-      if (dimension.getListOfChildDimensions().get(i).getColumnId().equals(columnIdentifier)) {
-        return dimension.getListOfChildDimensions().get(i);
-      } else if (dimension.getListOfChildDimensions().get(i).numberOfChild() > 0) {
-        CarbonDimension childDim = getCarbonChildDimsBasedOnColIdentifier(columnIdentifier,
-            dimension.getListOfChildDimensions().get(i));
-        if (null != childDim) {
-          return childDim;
-        }
-      }
-    }
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/BlockletInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/BlockletInfo.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/BlockletInfo.java
deleted file mode 100644
index de998a9..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/BlockletInfo.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.metadata.blocklet;
-
-import java.io.Serializable;
-import java.util.List;
-
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex;
-
-/**
- * class to store the information about the blocklet
- */
-public class BlockletInfo implements Serializable {
-
-  /**
-   * serialization id
-   */
-  private static final long serialVersionUID = 1873135459695635381L;
-
-  /**
-   * Number of rows in this blocklet
-   */
-  private int numberOfRows;
-
-  /**
-   * Information about dimension chunk of all dimensions in this blocklet
-   */
-  private List<DataChunk> dimensionColumnChunk;
-
-  /**
-   * Information about measure chunk of all measures in this blocklet
-   */
-  private List<DataChunk> measureColumnChunk;
-
-  /**
-   * to store the index like min max and start and end key of each column of the blocklet
-   */
-  private BlockletIndex blockletIndex;
-
-  /**
-   * @return the numberOfRows
-   */
-  public int getNumberOfRows() {
-    return numberOfRows;
-  }
-
-  /**
-   * @param numberOfRows the numberOfRows to set
-   */
-  public void setNumberOfRows(int numberOfRows) {
-    this.numberOfRows = numberOfRows;
-  }
-
-  /**
-   * @return the dimensionColumnChunk
-   */
-  public List<DataChunk> getDimensionColumnChunk() {
-    return dimensionColumnChunk;
-  }
-
-  /**
-   * @param dimensionColumnChunk the dimensionColumnChunk to set
-   */
-  public void setDimensionColumnChunk(List<DataChunk> dimensionColumnChunk) {
-    this.dimensionColumnChunk = dimensionColumnChunk;
-  }
-
-  /**
-   * @return the measureColumnChunk
-   */
-  public List<DataChunk> getMeasureColumnChunk() {
-    return measureColumnChunk;
-  }
-
-  /**
-   * @param measureColumnChunk the measureColumnChunk to set
-   */
-  public void setMeasureColumnChunk(List<DataChunk> measureColumnChunk) {
-    this.measureColumnChunk = measureColumnChunk;
-  }
-
-  /**
-   * @return the blockletIndex
-   */
-  public BlockletIndex getBlockletIndex() {
-    return blockletIndex;
-  }
-
-  /**
-   * @param blockletIndex the blockletIndex to set
-   */
-  public void setBlockletIndex(BlockletIndex blockletIndex) {
-    this.blockletIndex = blockletIndex;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/DataFileFooter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/DataFileFooter.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/DataFileFooter.java
deleted file mode 100644
index 94d8e8b..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/DataFileFooter.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.blocklet;
-
-import java.io.Serializable;
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.block.TableBlockInfo;
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex;
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-
-/**
- * Information of one data file
- */
-public class DataFileFooter implements Serializable {
-
-  /**
-   * serialization id
-   */
-  private static final long serialVersionUID = -7284319972734500751L;
-
-  /**
-   * version used for data compatibility
-   */
-  private int versionId;
-
-  /**
-   * total number of rows in this file
-   */
-  private long numberOfRows;
-
-  /**
-   * Segment info (will be same/repeated for all block in this segment)
-   */
-  private SegmentInfo segmentInfo;
-
-  /**
-   * Information about leaf nodes of all columns in this file
-   */
-  private List<BlockletInfo> blockletList;
-
-  /**
-   * blocklet index of all blocklets in this file
-   */
-  private BlockletIndex blockletIndex;
-
-  /**
-   * Description of columns in this file
-   */
-  private List<ColumnSchema> columnInTable;
-
-  /**
-   * to store the block info detail like file name block index and locations
-   */
-  private TableBlockInfo tableBlockInfo;
-
-  /**
-   * @return the versionId
-   */
-  public int getVersionId() {
-    return versionId;
-  }
-
-  /**
-   * @param versionId the versionId to set
-   */
-  public void setVersionId(int versionId) {
-    this.versionId = versionId;
-  }
-
-  /**
-   * @return the numberOfRows
-   */
-  public long getNumberOfRows() {
-    return numberOfRows;
-  }
-
-  /**
-   * @param numberOfRows the numberOfRows to set
-   */
-  public void setNumberOfRows(long numberOfRows) {
-    this.numberOfRows = numberOfRows;
-  }
-
-  /**
-   * @return the segmentInfo
-   */
-  public SegmentInfo getSegmentInfo() {
-    return segmentInfo;
-  }
-
-  /**
-   * @param segmentInfo the segmentInfo to set
-   */
-  public void setSegmentInfo(SegmentInfo segmentInfo) {
-    this.segmentInfo = segmentInfo;
-  }
-
-  /**
-   * @return the List of Blocklet
-   */
-  public List<BlockletInfo> getBlockletList() {
-    return blockletList;
-  }
-
-  /**
-   * @param blockletList the blockletList to set
-   */
-  public void setBlockletList(List<BlockletInfo> blockletList) {
-    this.blockletList = blockletList;
-  }
-
-  /**
-   * @return the blockletIndex
-   */
-  public BlockletIndex getBlockletIndex() {
-    return blockletIndex;
-  }
-
-  /**
-   * @param blockletIndex the blockletIndex to set
-   */
-  public void setBlockletIndex(BlockletIndex blockletIndex) {
-    this.blockletIndex = blockletIndex;
-  }
-
-  /**
-   * @return the columnInTable
-   */
-  public List<ColumnSchema> getColumnInTable() {
-    return columnInTable;
-  }
-
-  /**
-   * @param columnInTable the columnInTable to set
-   */
-  public void setColumnInTable(List<ColumnSchema> columnInTable) {
-    this.columnInTable = columnInTable;
-  }
-
-  /**
-   * @return the tableBlockInfo
-   */
-  public TableBlockInfo getTableBlockInfo() {
-    return tableBlockInfo;
-  }
-
-  /**
-   * @param tableBlockInfo the tableBlockInfo to set
-   */
-  public void setTableBlockInfo(TableBlockInfo tableBlockInfo) {
-    this.tableBlockInfo = tableBlockInfo;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/SegmentInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/SegmentInfo.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/SegmentInfo.java
deleted file mode 100644
index a69e061..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/SegmentInfo.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.core.carbon.metadata.blocklet;
-
-import java.io.Serializable;
-
-/**
- * Class holds the information about the segment information
- */
-public class SegmentInfo implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = -1749874611112709431L;
-
-  /**
-   * number of column in the segment
-   */
-  private int numberOfColumns;
-
-  /**
-   * cardinality of each columns
-   * column which is not participating in the multidimensional key cardinality will be -1;
-   */
-  private int[] columnCardinality;
-
-  /**
-   * @return the numberOfColumns
-   */
-  public int getNumberOfColumns() {
-    return numberOfColumns;
-  }
-
-  /**
-   * @param numberOfColumns the numberOfColumns to set
-   */
-  public void setNumberOfColumns(int numberOfColumns) {
-    this.numberOfColumns = numberOfColumns;
-  }
-
-  /**
-   * @return the columnCardinality
-   */
-  public int[] getColumnCardinality() {
-    return columnCardinality;
-  }
-
-  /**
-   * @param columnCardinality the columnCardinality to set
-   */
-  public void setColumnCardinality(int[] columnCardinality) {
-    this.columnCardinality = columnCardinality;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/compressor/ChunkCompressorMeta.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/compressor/ChunkCompressorMeta.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/compressor/ChunkCompressorMeta.java
deleted file mode 100644
index bcfd76e..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/compressor/ChunkCompressorMeta.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.metadata.blocklet.compressor;
-
-import java.io.Serializable;
-
-/**
- * Represents the compression information of data of one dimension
- * one dimension group in one blocklet
- */
-public class ChunkCompressorMeta implements Serializable {
-
-  /**
-   * serialization version
-   */
-  private static final long serialVersionUID = -6697087170420991140L;
-
-  /**
-   * data chunk compressor
-   */
-  private CompressionCodec compressor;
-
-  /**
-   * total byte size of all uncompressed pages in this column chunk (including the headers)
-   */
-  private long uncompressedSize;
-
-  /**
-   * total byte size of all compressed pages in this column chunk (including the headers)
-   */
-  private long compressedSize;
-
-  /**
-   * @return the compressor
-   */
-  public CompressionCodec getCompressorCodec() {
-    return compressor;
-  }
-
-  /**
-   * @param compressor the compressor to set
-   */
-  public void setCompressor(CompressionCodec compressor) {
-    this.compressor = compressor;
-  }
-
-  /**
-   * @return the uncompressedSize
-   */
-  public long getUncompressedSize() {
-    return uncompressedSize;
-  }
-
-  /**
-   * @param uncompressedSize the uncompressedSize to set
-   */
-  public void setUncompressedSize(long uncompressedSize) {
-    this.uncompressedSize = uncompressedSize;
-  }
-
-  /**
-   * @return the compressedSize
-   */
-  public long getCompressedSize() {
-    return compressedSize;
-  }
-
-  /**
-   * @param compressedSize the compressedSize to set
-   */
-  public void setCompressedSize(long compressedSize) {
-    this.compressedSize = compressedSize;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/compressor/CompressionCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/compressor/CompressionCodec.java b/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/compressor/CompressionCodec.java
deleted file mode 100644
index 76d2ddb..0000000
--- a/core/src/main/java/org/carbondata/core/carbon/metadata/blocklet/compressor/CompressionCodec.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.carbon.metadata.blocklet.compressor;
-
-/**
- * Compressions supported by Carbon Data.
- */
-public enum CompressionCodec {
-
-  /**
-   * snappy compression
-   */
-  SNAPPY,
-}



[34/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java b/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java
new file mode 100644
index 0000000..9fb1e6e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+public interface LoadStatistics {
+  //Init PartitonInfo
+  void  initPartitonInfo(String PartitionId);
+
+  //Record the time
+  void recordDicShuffleAndWriteTime();
+
+  void recordLoadCsvfilesToDfTime();
+
+  void recordDictionaryValuesTotalTime(String partitionID,
+      Long dictionaryValuesTotalTimeTimePoint);
+
+  void recordCsvInputStepTime(String partitionID,
+      Long csvInputStepTimePoint);
+
+  void recordLruCacheLoadTime(double lruCacheLoadTime);
+
+  void recordGeneratingDictionaryValuesTime(String partitionID,
+      Long generatingDictionaryValuesTimePoint);
+
+  void recordSortRowsStepTotalTime(String partitionID,
+      Long sortRowsStepTotalTimePoint);
+
+  void recordMdkGenerateTotalTime(String partitionID,
+      Long mdkGenerateTotalTimePoint);
+
+  void recordDictionaryValue2MdkAdd2FileTime(String partitionID,
+      Long dictionaryValue2MdkAdd2FileTimePoint);
+
+  //Record the node blocks information map
+  void recordHostBlockMap(String host, Integer numBlocks);
+
+  //Record the partition blocks information map
+  void recordPartitionBlockMap(String partitionID, Integer numBlocks);
+
+  //Record total num of records processed
+  void recordTotalRecords(long totalRecords);
+
+  //Print the statistics information
+  void printStatisticsInfo(String partitionID);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/util/ValueCompressionUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/ValueCompressionUtil.java b/core/src/main/java/org/apache/carbondata/core/util/ValueCompressionUtil.java
new file mode 100644
index 0000000..4a229d6
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/util/ValueCompressionUtil.java
@@ -0,0 +1,1027 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.util;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastorage.store.compression.MeasureMetaDataModel;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressByteArray;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressDefaultLong;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinByte;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinDefault;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinFloat;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinInt;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinLong;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressMaxMinShort;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalByte;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalDefault;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalFloat;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalInt;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalLong;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinByte;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinDefault;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinFloat;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinInt;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinLong;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalMaxMinShort;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNonDecimalShort;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNoneByte;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNoneDefault;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNoneFloat;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNoneInt;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNoneLong;
+import org.apache.carbondata.core.datastorage.store.compression.type.UnCompressNoneShort;
+
+public final class ValueCompressionUtil {
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(ValueCompressionUtil.class.getName());
+
+  private ValueCompressionUtil() {
+
+  }
+
+  /**
+   * decide actual type of value
+   *
+   * @param value   :the measure value
+   * @param decimal :
+   * @return: actual type of value
+   * @see
+   */
+  private static DataType getDataType(double value, int decimal, byte dataTypeSelected) {
+    DataType dataType = DataType.DATA_DOUBLE;
+    if (decimal == 0) {
+      if (value < Byte.MAX_VALUE) {
+        dataType = DataType.DATA_BYTE;
+      } else if (value < Short.MAX_VALUE) {
+        dataType = DataType.DATA_SHORT;
+      } else if (value < Integer.MAX_VALUE) {
+        dataType = DataType.DATA_INT;
+      } else if (value < Long.MAX_VALUE) {
+        dataType = DataType.DATA_LONG;
+      }
+    } else {
+      if (dataTypeSelected == 1) {
+        if (value < Float.MAX_VALUE) {
+          float floatValue = (float) value;
+          if (floatValue - value != 0) {
+            dataType = DataType.DATA_DOUBLE;
+
+          } else {
+            dataType = DataType.DATA_FLOAT;
+          }
+        } else if (value < Double.MAX_VALUE) {
+          dataType = DataType.DATA_DOUBLE;
+        }
+      }
+    }
+    return dataType;
+  }
+
+  /**
+   * Gives the size of datatype
+   *
+   * @param dataType : measure value type
+   * @return: the size of DataType
+   * @see
+   */
+  public static int getSize(DataType dataType) {
+
+    switch (dataType) {
+      case DATA_BYTE:
+        return 1;
+      case DATA_SHORT:
+        return 2;
+      case DATA_INT:
+      case DATA_FLOAT:
+        return 4;
+      default:
+        return 8;
+    }
+  }
+
+  /**
+   * get the best compression type. priority list,from high to low:
+   * COMPRESSION_TYPE.NONE COMPRESSION_TYPE.MAX_MIN
+   * COMPRESSION_TYPE.NON_DECIMAL_CONVERT COMPRESSION_TYPE.MAX_MIN_NDC
+   *
+   * @param maxValue : max value of one measure
+   * @param minValue : min value of one measure
+   * @param decimal  : decimal num of one measure
+   * @return : the best compression type
+   * @see
+   */
+  private static CompressionFinder getCompressionType(Object maxValue, Object minValue, int decimal,
+      char aggregatorType, byte dataTypeSelected) {
+    // 'c' for aggregate table,'b' fo rBigdecimal, 'l' for long,'n' for double
+    switch (aggregatorType) {
+      case 'c':
+        return new CompressionFinder(COMPRESSION_TYPE.CUSTOM, DataType.DATA_BYTE,
+            DataType.DATA_BYTE);
+      case 'b':
+        return new CompressionFinder(COMPRESSION_TYPE.CUSTOM_BIGDECIMAL, DataType.DATA_BYTE,
+            DataType.DATA_BYTE);
+      case 'l':
+        return new CompressionFinder(COMPRESSION_TYPE.NONE,
+                DataType.DATA_BIGINT, DataType.DATA_BIGINT);
+      default:
+        break;
+    }
+    // None Decimal
+    if (decimal == 0) {
+      if (getSize(getDataType((double) maxValue, decimal, dataTypeSelected)) > getSize(
+          getDataType((double) maxValue - (double) minValue, decimal, dataTypeSelected))) {
+        return new CompressionFinder(COMPRESSION_TYPE.MAX_MIN, DataType.DATA_DOUBLE,
+            getDataType((double) maxValue - (double) minValue, decimal, dataTypeSelected));
+      } else if (getSize(getDataType((double) maxValue, decimal, dataTypeSelected)) < getSize(
+              getDataType((double) maxValue - (double) minValue, decimal, dataTypeSelected))) {
+        return new CompressionFinder(COMPRESSION_TYPE.NONE, DataType.DATA_DOUBLE,
+                getDataType((double) maxValue - (double) minValue, decimal, dataTypeSelected));
+      } else {
+        return new CompressionFinder(COMPRESSION_TYPE.NONE, DataType.DATA_DOUBLE,
+            getDataType((double) maxValue, decimal, dataTypeSelected));
+      }
+    }
+    // decimal
+    else {
+      DataType actualDataType = getDataType((double) maxValue, decimal, dataTypeSelected);
+      DataType diffDataType =
+          getDataType((double) maxValue - (double) minValue, decimal, dataTypeSelected);
+      DataType maxNonDecDataType =
+          getDataType(Math.pow(10, decimal) * (double) maxValue, 0, dataTypeSelected);
+      DataType diffNonDecDataType =
+          getDataType(Math.pow(10, decimal) * ((double) maxValue - (double) minValue), 0,
+              dataTypeSelected);
+
+      CompressionFinder[] finders = new CompressionFinder[] {
+          new CompressionFinder(actualDataType, actualDataType, CompressionFinder.PRIORITY.ACTUAL,
+              COMPRESSION_TYPE.NONE),
+          new CompressionFinder(actualDataType, diffDataType, CompressionFinder.PRIORITY.DIFFSIZE,
+              COMPRESSION_TYPE.MAX_MIN), new CompressionFinder(actualDataType, maxNonDecDataType,
+          CompressionFinder.PRIORITY.MAXNONDECIMAL, COMPRESSION_TYPE.NON_DECIMAL_CONVERT),
+          new CompressionFinder(actualDataType, diffNonDecDataType,
+              CompressionFinder.PRIORITY.DIFFNONDECIMAL, COMPRESSION_TYPE.MAX_MIN_NDC) };
+      // sort the compressionFinder.The top have the highest priority
+      Arrays.sort(finders);
+      CompressionFinder compression = finders[0];
+      return compression;
+    }
+  }
+
+  /**
+   * @param compType        : compression type
+   * @param values          : the data of one measure
+   * @param changedDataType : changed data type
+   * @param maxValue        : the max value of one measure
+   * @param decimal         : the decimal length of one measure
+   * @return: the compress data array
+   * @see
+   */
+  public static Object getCompressedValues(COMPRESSION_TYPE compType, double[] values,
+      DataType changedDataType, double maxValue, int decimal) {
+    Object o;
+    switch (compType) {
+      case NONE:
+
+        o = compressNone(changedDataType, values);
+        return o;
+
+      case MAX_MIN:
+
+        o = compressMaxMin(changedDataType, values, maxValue);
+        return o;
+
+      case NON_DECIMAL_CONVERT:
+
+        o = compressNonDecimal(changedDataType, values, decimal);
+        return o;
+
+      default:
+        o = compressNonDecimalMaxMin(changedDataType, values, decimal, maxValue);
+        return o;
+    }
+  }
+
+  public static Object getCompressedValues(COMPRESSION_TYPE compType, long[] values,
+      DataType changedDataType, long maxValue, int decimal) {
+    Object o;
+    switch (compType) {
+      case NONE:
+      default:
+        return values;
+    }
+  }
+
+  private static ValueCompressonHolder.UnCompressValue[] getUncompressedValues(
+      COMPRESSION_TYPE[] compType, DataType[] actualDataType, DataType[] changedDataType) {
+
+    ValueCompressonHolder.UnCompressValue[] compressValue =
+        new ValueCompressonHolder.UnCompressValue[changedDataType.length];
+    for (int i = 0; i < changedDataType.length; i++) {
+      switch (compType[i]) {
+        case NONE:
+
+          compressValue[i] = unCompressNone(changedDataType[i], actualDataType[i]);
+          break;
+
+        case MAX_MIN:
+
+          compressValue[i] = unCompressMaxMin(changedDataType[i], actualDataType[i]);
+          break;
+
+        case NON_DECIMAL_CONVERT:
+
+          compressValue[i] = unCompressNonDecimal(changedDataType[i], DataType.DATA_DOUBLE);
+          break;
+
+        case CUSTOM:
+          compressValue[i] = new UnCompressByteArray(UnCompressByteArray.ByteArrayType.BYTE_ARRAY);
+          break;
+
+        case CUSTOM_BIGDECIMAL:
+          compressValue[i] = new UnCompressByteArray(UnCompressByteArray.ByteArrayType.BIG_DECIMAL);
+          break;
+
+        default:
+          compressValue[i] = unCompressNonDecimalMaxMin(changedDataType[i], null);
+      }
+    }
+    return compressValue;
+
+  }
+
+  /**
+   * compress data to other type for example: double -> int
+   */
+  private static Object compressNone(DataType changedDataType, double[] value) {
+    int i = 0;
+    switch (changedDataType) {
+
+      case DATA_BYTE:
+
+        byte[] result = new byte[value.length];
+
+        for (double a : value) {
+          result[i] = (byte) a;
+          i++;
+        }
+        return result;
+
+      case DATA_SHORT:
+
+        short[] shortResult = new short[value.length];
+
+        for (double a : value) {
+          shortResult[i] = (short) a;
+          i++;
+        }
+        return shortResult;
+
+      case DATA_INT:
+
+        int[] intResult = new int[value.length];
+
+        for (double a : value) {
+          intResult[i] = (int) a;
+          i++;
+        }
+        return intResult;
+
+      case DATA_LONG:
+      case DATA_BIGINT:
+
+        long[] longResult = new long[value.length];
+
+        for (double a : value) {
+          longResult[i] = (long) a;
+          i++;
+        }
+        return longResult;
+
+      case DATA_FLOAT:
+
+        float[] floatResult = new float[value.length];
+
+        for (double a : value) {
+          floatResult[i] = (float) a;
+          i++;
+        }
+        return floatResult;
+
+      default:
+
+        return value;
+
+    }
+  }
+
+  /**
+   * compress data to other type through sub value for example: 1. subValue =
+   * maxValue - value 2. subValue: double->int
+   */
+  private static Object compressMaxMin(DataType changedDataType, double[] value, double maxValue) {
+    int i = 0;
+    switch (changedDataType) {
+      case DATA_BYTE:
+
+        byte[] result = new byte[value.length];
+        for (double a : value) {
+          result[i] = (byte) (maxValue - a);
+          i++;
+        }
+        return result;
+
+      case DATA_SHORT:
+
+        short[] shortResult = new short[value.length];
+
+        for (double a : value) {
+          shortResult[i] = (short) (maxValue - a);
+          i++;
+        }
+        return shortResult;
+
+      case DATA_INT:
+
+        int[] intResult = new int[value.length];
+
+        for (double a : value) {
+          intResult[i] = (int) (maxValue - a);
+          i++;
+        }
+        return intResult;
+
+      case DATA_LONG:
+
+        long[] longResult = new long[value.length];
+
+        for (double a : value) {
+          longResult[i] = (long) (maxValue - a);
+          i++;
+        }
+        return longResult;
+
+      case DATA_FLOAT:
+
+        float[] floatResult = new float[value.length];
+
+        for (double a : value) {
+          floatResult[i] = (float) (maxValue - a);
+          i++;
+        }
+        return floatResult;
+
+      default:
+
+        double[] defaultResult = new double[value.length];
+
+        for (double a : value) {
+          defaultResult[i] = (double) (maxValue - a);
+          i++;
+        }
+        return defaultResult;
+
+    }
+  }
+
+  /**
+   * compress data to other type through sub value for example: 1. subValue =
+   * value * Math.pow(10, decimal) 2. subValue: double->int
+   */
+  private static Object compressNonDecimal(DataType changedDataType, double[] value, int decimal) {
+    int i = 0;
+    switch (changedDataType) {
+      case DATA_BYTE:
+        byte[] result = new byte[value.length];
+
+        for (double a : value) {
+          result[i] = (byte) (Math.round(Math.pow(10, decimal) * a));
+          i++;
+        }
+        return result;
+      case DATA_SHORT:
+        short[] shortResult = new short[value.length];
+
+        for (double a : value) {
+          shortResult[i] = (short) (Math.round(Math.pow(10, decimal) * a));
+          i++;
+        }
+        return shortResult;
+      case DATA_INT:
+
+        int[] intResult = new int[value.length];
+
+        for (double a : value) {
+          intResult[i] = (int) (Math.round(Math.pow(10, decimal) * a));
+          i++;
+        }
+        return intResult;
+
+      case DATA_LONG:
+
+        long[] longResult = new long[value.length];
+
+        for (double a : value) {
+          longResult[i] = (long) (Math.round(Math.pow(10, decimal) * a));
+          i++;
+        }
+        return longResult;
+
+      case DATA_FLOAT:
+
+        float[] floatResult = new float[value.length];
+
+        for (double a : value) {
+          floatResult[i] = (float) (Math.round(Math.pow(10, decimal) * a));
+          i++;
+        }
+        return floatResult;
+
+      default:
+        double[] defaultResult = new double[value.length];
+
+        for (double a : value) {
+          defaultResult[i] = (double) (Math.round(Math.pow(10, decimal) * a));
+          i++;
+        }
+        return defaultResult;
+    }
+  }
+
+  /**
+   * compress data to other type through sub value for example: 1. subValue =
+   * maxValue - value 2. subValue = subValue * Math.pow(10, decimal) 3.
+   * subValue: double->int
+   */
+  private static Object compressNonDecimalMaxMin(DataType changedDataType, double[] value,
+      int decimal, double maxValue) {
+    int i = 0;
+    switch (changedDataType) {
+      case DATA_BYTE:
+
+        byte[] result = new byte[value.length];
+
+        for (double a : value) {
+          result[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
+          i++;
+        }
+        return result;
+
+      case DATA_SHORT:
+
+        short[] shortResult = new short[value.length];
+
+        for (double a : value) {
+          shortResult[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
+          i++;
+        }
+        return shortResult;
+
+      case DATA_INT:
+
+        int[] intResult = new int[value.length];
+
+        for (double a : value) {
+          intResult[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
+          i++;
+        }
+        return intResult;
+
+      case DATA_LONG:
+
+        long[] longResult = new long[value.length];
+
+        for (double a : value) {
+          longResult[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
+          i++;
+        }
+        return longResult;
+
+      case DATA_FLOAT:
+
+        float[] floatResult = new float[value.length];
+
+        for (double a : value) {
+          floatResult[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
+          i++;
+        }
+        return floatResult;
+
+      default:
+
+        double[] defaultResult = new double[value.length];
+
+        for (double a : value) {
+          defaultResult[i] = (byte) (Math.round((maxValue - a) * Math.pow(10, decimal)));
+          i++;
+        }
+        return defaultResult;
+
+    }
+  }
+
+  /**
+   * uncompress data for example: int -> double
+   */
+  public static ValueCompressonHolder.UnCompressValue unCompressNone(DataType compDataType,
+      DataType actualDataType) {
+    if (actualDataType == DataType.DATA_BIGINT) {
+      return new UnCompressDefaultLong();
+    } else {
+      switch (compDataType) {
+        case DATA_BYTE:
+
+          return new UnCompressNoneByte();
+
+        case DATA_SHORT:
+
+          return new UnCompressNoneShort();
+
+        case DATA_INT:
+
+          return new UnCompressNoneInt();
+
+        case DATA_LONG:
+
+          return new UnCompressNoneLong();
+
+        case DATA_FLOAT:
+
+          return new UnCompressNoneFloat();
+
+        default:
+
+          return new UnCompressNoneDefault();
+      }
+    }
+  }
+
+  /**
+   * uncompress data 1. value = maxValue - subValue 2. value: int->double
+   */
+  public static ValueCompressonHolder.UnCompressValue unCompressMaxMin(DataType compDataType,
+      DataType actualDataType) {
+    switch (compDataType) {
+      case DATA_BYTE:
+
+        return new UnCompressMaxMinByte();
+
+      case DATA_SHORT:
+
+        return new UnCompressMaxMinShort();
+
+      case DATA_INT:
+
+        return new UnCompressMaxMinInt();
+
+      case DATA_LONG:
+
+        return new UnCompressMaxMinLong();
+
+      case DATA_FLOAT:
+
+        return new UnCompressMaxMinFloat();
+
+      default:
+
+        return new UnCompressMaxMinDefault();
+
+    }
+  }
+
+  /**
+   * uncompress data value = value/Math.pow(10, decimal)
+   */
+  public static ValueCompressonHolder.UnCompressValue unCompressNonDecimal(DataType compDataType,
+      DataType actualDataType) {
+    switch (compDataType) {
+      case DATA_BYTE:
+
+        return new UnCompressNonDecimalByte();
+
+      case DATA_SHORT:
+
+        return new UnCompressNonDecimalShort();
+
+      case DATA_INT:
+
+        return new UnCompressNonDecimalInt();
+
+      case DATA_LONG:
+
+        return new UnCompressNonDecimalLong();
+
+      case DATA_FLOAT:
+
+        return new UnCompressNonDecimalFloat();
+
+      default:
+
+        return new UnCompressNonDecimalDefault();
+
+    }
+  }
+
+  /**
+   * uncompress data value = (maxValue - subValue)/Math.pow(10, decimal)
+   */
+  public static ValueCompressonHolder.UnCompressValue unCompressNonDecimalMaxMin(
+      DataType compDataType, DataType actualDataType) {
+    switch (compDataType) {
+      case DATA_BYTE:
+
+        return new UnCompressNonDecimalMaxMinByte();
+
+      case DATA_SHORT:
+
+        return new UnCompressNonDecimalMaxMinShort();
+
+      case DATA_INT:
+
+        return new UnCompressNonDecimalMaxMinInt();
+
+      case DATA_LONG:
+
+        return new UnCompressNonDecimalMaxMinLong();
+
+      case DATA_FLOAT:
+
+        return new UnCompressNonDecimalMaxMinFloat();
+
+      default:
+
+        return new UnCompressNonDecimalMaxMinDefault();
+
+    }
+  }
+
+  /**
+   * Create Value compression model
+   *
+   * @param maxValue
+   * @param minValue
+   * @param decimalLength
+   * @param uniqueValue
+   * @param aggType
+   * @param dataTypeSelected
+   * @return
+   */
+  public static ValueCompressionModel getValueCompressionModel(Object[] maxValue, Object[] minValue,
+      int[] decimalLength, Object[] uniqueValue, char[] aggType, byte[] dataTypeSelected) {
+
+    MeasureMetaDataModel metaDataModel =
+        new MeasureMetaDataModel(minValue, maxValue, decimalLength, maxValue.length, uniqueValue,
+            aggType, dataTypeSelected);
+    return getValueCompressionModel(metaDataModel);
+  }
+
+  public static ValueCompressionModel getValueCompressionModel(MeasureMetaDataModel measureMDMdl) {
+    int measureCount = measureMDMdl.getMeasureCount();
+    Object[] minValue = measureMDMdl.getMinValue();
+    Object[] maxValue = measureMDMdl.getMaxValue();
+    Object[] uniqueValue = measureMDMdl.getUniqueValue();
+    int[] decimal = measureMDMdl.getDecimal();
+    char[] type = measureMDMdl.getType();
+    byte[] dataTypeSelected = measureMDMdl.getDataTypeSelected();
+    ValueCompressionModel compressionModel = new ValueCompressionModel();
+    DataType[] actualType = new DataType[measureCount];
+    DataType[] changedType = new DataType[measureCount];
+    COMPRESSION_TYPE[] compType = new COMPRESSION_TYPE[measureCount];
+    for (int i = 0; i < measureCount; i++) {
+      CompressionFinder compresssionFinder = ValueCompressionUtil
+          .getCompressionType(maxValue[i], minValue[i], decimal[i], type[i], dataTypeSelected[i]);
+      actualType[i] = compresssionFinder.actualDataType;
+      changedType[i] = compresssionFinder.changedDataType;
+      compType[i] = compresssionFinder.compType;
+    }
+    compressionModel.setMaxValue(maxValue);
+    compressionModel.setDecimal(decimal);
+    compressionModel.setChangedDataType(changedType);
+    compressionModel.setCompType(compType);
+    compressionModel.setActualDataType(actualType);
+    compressionModel.setMinValue(minValue);
+    compressionModel.setUniqueValue(uniqueValue);
+    compressionModel.setType(type);
+    compressionModel.setMinValueFactForAgg(measureMDMdl.getMinValueFactForAgg());
+    compressionModel.setDataTypeSelected(dataTypeSelected);
+    ValueCompressonHolder.UnCompressValue[] values = ValueCompressionUtil
+        .getUncompressedValues(compressionModel.getCompType(), compressionModel.getActualDataType(),
+            compressionModel.getChangedDataType());
+    compressionModel.setUnCompressValues(values);
+    return compressionModel;
+  }
+
+  public static byte[] convertToBytes(short[] values) {
+    ByteBuffer buffer = ByteBuffer.allocate(values.length * 2);
+    for (short val : values) {
+      buffer.putShort(val);
+    }
+    return buffer.array();
+  }
+
+  public static byte[] convertToBytes(int[] values) {
+    ByteBuffer buffer = ByteBuffer.allocate(values.length * 4);
+    for (int val : values) {
+      buffer.putInt(val);
+    }
+    return buffer.array();
+  }
+
+  public static byte[] convertToBytes(float[] values) {
+    ByteBuffer buffer = ByteBuffer.allocate(values.length * 4);
+    for (float val : values) {
+      buffer.putFloat(val);
+    }
+    return buffer.array();
+  }
+
+  public static byte[] convertToBytes(long[] values) {
+    ByteBuffer buffer = ByteBuffer.allocate(values.length * 8);
+    for (long val : values) {
+      buffer.putLong(val);
+    }
+    return buffer.array();
+  }
+
+  public static byte[] convertToBytes(double[] values) {
+    ByteBuffer buffer = ByteBuffer.allocate(values.length * 8);
+    for (double val : values) {
+      buffer.putDouble(val);
+    }
+    return buffer.array();
+  }
+
+  public static short[] convertToShortArray(ByteBuffer buffer, int length) {
+    buffer.rewind();
+    short[] values = new short[length / 2];
+
+    for (int i = 0; i < values.length; i++) {
+      values[i] = buffer.getShort();
+    }
+    return values;
+  }
+
+  public static int[] convertToIntArray(ByteBuffer buffer, int length) {
+    buffer.rewind();
+    int[] values = new int[length / 4];
+
+    for (int i = 0; i < values.length; i++) {
+      values[i] = buffer.getInt();
+    }
+    return values;
+  }
+
+  public static float[] convertToFloatArray(ByteBuffer buffer, int length) {
+    buffer.rewind();
+    float[] values = new float[length / 4];
+
+    for (int i = 0; i < values.length; i++) {
+      values[i] = buffer.getFloat();
+    }
+    return values;
+  }
+
+  public static long[] convertToLongArray(ByteBuffer buffer, int length) {
+    buffer.rewind();
+    long[] values = new long[length / 8];
+    for (int i = 0; i < values.length; i++) {
+      values[i] = buffer.getLong();
+    }
+    return values;
+  }
+
+  public static double[] convertToDoubleArray(ByteBuffer buffer, int length) {
+    buffer.rewind();
+    double[] values = new double[length / 8];
+    for (int i = 0; i < values.length; i++) {
+      values[i] = buffer.getDouble();
+    }
+    return values;
+  }
+
+  /**
+   * use to identify compression type.
+   */
+  public static enum COMPRESSION_TYPE {
+    /**
+     *
+     */
+    NONE, /**
+     *
+     */
+    MAX_MIN, /**
+     *
+     */
+    NON_DECIMAL_CONVERT, /**
+     *
+     */
+    MAX_MIN_NDC,
+
+    /**
+     * custome
+     */
+    CUSTOM,
+
+    CUSTOM_BIGDECIMAL
+  }
+
+  /**
+   * use to identify the type of data.
+   */
+  public static enum DataType {
+    /**
+     *
+     */
+    DATA_BYTE(), /**
+     *
+     */
+    DATA_SHORT(), /**
+     *
+     */
+    DATA_INT(), /**
+     *
+     */
+    DATA_FLOAT(), /**
+     *
+     */
+    DATA_LONG(), /**
+     *
+     */
+    DATA_BIGINT(), /**
+     *
+     */
+    DATA_DOUBLE();
+
+    /**
+     * DataType.
+     */
+    private DataType() {
+      //this.size = size;
+    }
+
+  }
+
+  /**
+   * through the size of data type,priority and compression type, select the
+   * best compression type
+   */
+  private static class CompressionFinder implements Comparable<CompressionFinder> {
+    /**
+     * compType.
+     */
+    private COMPRESSION_TYPE compType;
+    /**
+     * actualDataType.
+     */
+    private DataType actualDataType;
+    /**
+     * changedDataType.
+     */
+    private DataType changedDataType;
+    /**
+     * the size of changed data
+     */
+    private int size;
+    /**
+     * priority.
+     */
+    private PRIORITY priority;
+
+    /**
+     * CompressionFinder constructor.
+     *
+     * @param compType
+     * @param actualDataType
+     * @param changedDataType
+     */
+    CompressionFinder(COMPRESSION_TYPE compType, DataType actualDataType,
+        DataType changedDataType) {
+      super();
+      this.compType = compType;
+      this.actualDataType = actualDataType;
+      this.changedDataType = changedDataType;
+    }
+
+    /**
+     * CompressionFinder overloaded constructor.
+     *
+     * @param actualDataType
+     * @param changedDataType
+     * @param priority
+     * @param compType
+     */
+
+    CompressionFinder(DataType actualDataType, DataType changedDataType, PRIORITY priority,
+        COMPRESSION_TYPE compType) {
+      super();
+      this.actualDataType = actualDataType;
+      this.changedDataType = changedDataType;
+      this.size = getSize(changedDataType);
+      this.priority = priority;
+      this.compType = compType;
+    }
+
+    @Override public boolean equals(Object obj) {
+      boolean equals = false;
+      if (obj instanceof CompressionFinder) {
+        CompressionFinder cf = (CompressionFinder) obj;
+
+        if (this.size == cf.size && this.priority == cf.priority) {
+          equals = true;
+        }
+
+      }
+      return equals;
+    }
+
+    @Override public int hashCode() {
+      final int code = 31;
+      int result = 1;
+
+      result = code * result + this.size;
+      result = code * result + ((priority == null) ? 0 : priority.hashCode());
+      return result;
+    }
+
+    @Override public int compareTo(CompressionFinder o) {
+      int returnVal = 0;
+      // the big size have high priority
+      if (this.equals(o)) {
+        returnVal = 0;
+      } else if (this.size == o.size) {
+        // the compression type priority
+        if (priority.priority > o.priority.priority) {
+          returnVal = 1;
+        } else if (priority.priority < o.priority.priority) {
+          returnVal = -1;
+        }
+
+      } else if (this.size > o.size) {
+        returnVal = 1;
+      } else {
+        returnVal = -1;
+      }
+      return returnVal;
+    }
+
+    /**
+     * Compression type priority.
+     * ACTUAL is the highest priority and DIFFNONDECIMAL is the lowest
+     * priority
+     */
+    static enum PRIORITY {
+      /**
+       *
+       */
+      ACTUAL(0), /**
+       *
+       */
+      DIFFSIZE(1), /**
+       *
+       */
+      MAXNONDECIMAL(2), /**
+       *
+       */
+      DIFFNONDECIMAL(3);
+
+      /**
+       * priority.
+       */
+      private int priority;
+
+      private PRIORITY(int priority) {
+        this.priority = priority;
+      }
+    }
+  }
+
+}
+
+
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/ByteArrayHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/ByteArrayHolder.java b/core/src/main/java/org/apache/carbondata/core/writer/ByteArrayHolder.java
new file mode 100644
index 0000000..73bbf11
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/ByteArrayHolder.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.writer;
+
+import java.util.Arrays;
+
+import org.apache.carbondata.core.util.ByteUtil;
+
+public class ByteArrayHolder implements Comparable<ByteArrayHolder> {
+
+  /**
+   * mdkey
+   */
+  private byte[] mdKey;
+
+  /**
+   * primary key
+   */
+  private int primaryKey;
+
+  /**
+   * @param mdKey
+   * @param primaryKey
+   */
+  public ByteArrayHolder(byte[] mdKey, int primaryKey) {
+    this.mdKey = mdKey;
+    this.primaryKey = primaryKey;
+  }
+
+  @Override public int compareTo(ByteArrayHolder o) {
+    return ByteUtil.compare(mdKey, o.mdKey);
+  }
+
+  @Override public boolean equals(Object obj) {
+    // TODO Auto-generated method stub
+    if (obj instanceof ByteArrayHolder) {
+      if (0 == ByteUtil.compare(mdKey, ((ByteArrayHolder) obj).mdKey)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @Override public int hashCode() {
+    int prime = 31;
+    int result = prime * Arrays.hashCode(mdKey);
+    result = result + prime * primaryKey;
+    return result;
+  }
+
+  public byte[] getMdKey() {
+    return mdKey;
+  }
+
+  public int getPrimaryKey() {
+    return primaryKey;
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/CarbonDictionaryWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/CarbonDictionaryWriter.java b/core/src/main/java/org/apache/carbondata/core/writer/CarbonDictionaryWriter.java
new file mode 100644
index 0000000..24ea06e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/CarbonDictionaryWriter.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.writer;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * dictionary writer interface
+ */
+public interface CarbonDictionaryWriter extends Closeable {
+  /**
+   * write method that accepts one value at a time
+   * This method can be used when data is huge and memory is les. In that
+   * case data can be stored to a file and an iterator can iterate over it and
+   * pass one value at a time
+   *
+   * @param value unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  void write(String value) throws IOException;
+
+  /**
+   * write method that accepts one value at a time
+   * This method can be used when data is huge and memory is les. In that
+   * case data can be stored to a file and an iterator can iterate over it and
+   * pass one value at a time
+   *
+   * @param value unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  void write(byte[] value) throws IOException;
+
+  /**
+   * write method that accepts list of byte arrays as value
+   * This can be used when data is less, then string can be converted
+   * to byte array for each value and added to a list
+   *
+   * @param valueList list of byte array. Each byte array is unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  void write(List<byte[]> valueList) throws IOException;
+
+
+  void commit() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/CarbonDictionaryWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/CarbonDictionaryWriterImpl.java b/core/src/main/java/org/apache/carbondata/core/writer/CarbonDictionaryWriterImpl.java
new file mode 100644
index 0000000..2e08610
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/CarbonDictionaryWriterImpl.java
@@ -0,0 +1,422 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.writer;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.factory.CarbonCommonFactory;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.reader.CarbonDictionaryColumnMetaChunk;
+import org.apache.carbondata.core.reader.CarbonDictionaryMetadataReader;
+import org.apache.carbondata.core.reader.CarbonDictionaryMetadataReaderImpl;
+import org.apache.carbondata.core.service.PathService;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.format.ColumnDictionaryChunk;
+import org.apache.carbondata.format.ColumnDictionaryChunkMeta;
+
+import org.apache.thrift.TBase;
+
+/**
+ * This class is responsible for writing the dictionary file and its metadata
+ */
+public class CarbonDictionaryWriterImpl implements CarbonDictionaryWriter {
+
+  /**
+   * LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(CarbonDictionaryWriterImpl.class.getName());
+
+  /**
+   * carbon type identifier
+   */
+  protected CarbonTableIdentifier carbonTableIdentifier;
+
+  /**
+   * list which will hold values upto maximum of one dictionary chunk size
+   */
+  private List<ByteBuffer> oneDictionaryChunkList;
+
+  /**
+   * Meta object which will hold last segment entry details
+   */
+  private CarbonDictionaryColumnMetaChunk chunkMetaObjectForLastSegmentEntry;
+
+  /**
+   * dictionary file and meta thrift writer
+   */
+  private ThriftWriter dictionaryThriftWriter;
+
+  /**
+   * column identifier
+   */
+  protected ColumnIdentifier columnIdentifier;
+
+  /**
+   * HDFS store path
+   */
+  protected String hdfsStorePath;
+
+  /**
+   * dictionary file path
+   */
+  protected String dictionaryFilePath;
+
+  /**
+   * dictionary metadata file path
+   */
+  protected String dictionaryMetaFilePath;
+
+  /**
+   * start offset of dictionary chunk  for a segment
+   */
+  private long chunk_start_offset;
+
+  /**
+   * end offset of a dictionary chunk for a segment
+   */
+  private long chunk_end_offset;
+
+  /**
+   * total dictionary value record count for one segment
+   */
+  private int totalRecordCount;
+
+  /**
+   * total thrift object chunk count written for one segment
+   */
+  private int chunk_count;
+
+  /**
+   * chunk size for a dictionary file after which data will be written to disk
+   */
+  private int dictionary_one_chunk_size;
+
+  /**
+   * flag to check whether write method is called for first time
+   */
+  private boolean isFirstTime;
+
+  private static final Charset defaultCharset = Charset.forName(
+      CarbonCommonConstants.DEFAULT_CHARSET);
+
+  /**
+   * Constructor
+   *
+   * @param hdfsStorePath         HDFS store path
+   * @param carbonTableIdentifier table identifier which will give table name and database name
+   * @param columnIdentifier      column unique identifier
+   */
+  public CarbonDictionaryWriterImpl(String hdfsStorePath,
+      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier) {
+    this.carbonTableIdentifier = carbonTableIdentifier;
+    this.columnIdentifier = columnIdentifier;
+    this.hdfsStorePath = hdfsStorePath;
+    this.isFirstTime = true;
+  }
+
+  /**
+   * This method will write the data in thrift format to disk. This method will be guided by
+   * parameter dictionary_one_chunk_size and data will be divided into chunks
+   * based on this parameter
+   *
+   * @param value unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public void write(String value) throws IOException {
+    write(value.getBytes(defaultCharset));
+  }
+
+  /**
+   * This method will write the data in thrift format to disk. This method will be guided by
+   * parameter dictionary_one_chunk_size and data will be divided into chunks
+   * based on this parameter
+   *
+   * @param value unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public void write(byte[] value) throws IOException {
+    if (isFirstTime) {
+      init();
+      isFirstTime = false;
+    }
+    // if one chunk size is equal to list size then write the data to file
+    checkAndWriteDictionaryChunkToFile();
+    oneDictionaryChunkList.add(ByteBuffer.wrap(value));
+    totalRecordCount++;
+  }
+
+  /**
+   * This method will write the data in thrift format to disk. This method will not be guided by
+   * parameter dictionary_one_chunk_size and complete data will be written as one chunk
+   *
+   * @param valueList list of byte array. Each byte array is unique dictionary value
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public void write(List<byte[]> valueList) throws IOException {
+    if (isFirstTime) {
+      init();
+      isFirstTime = false;
+    }
+    for (byte[] value : valueList) {
+      oneDictionaryChunkList.add(ByteBuffer.wrap(value));
+      totalRecordCount++;
+    }
+  }
+
+  /**
+   * write dictionary metadata file and close thrift object
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public void close() throws IOException {
+    if (null != dictionaryThriftWriter) {
+      writeDictionaryFile();
+      // close the thrift writer for dictionary file
+      closeThriftWriter();
+    }
+  }
+
+  /**
+   * check if the threshold has been reached for the number of
+   * values that can kept in memory and then flush the data to file
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  private void checkAndWriteDictionaryChunkToFile() throws IOException {
+    if (oneDictionaryChunkList.size() >= dictionary_one_chunk_size) {
+      writeDictionaryFile();
+      createChunkList();
+    }
+  }
+
+  /**
+   * This method will serialize the object of dictionary file
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  private void writeDictionaryFile() throws IOException {
+    ColumnDictionaryChunk columnDictionaryChunk = new ColumnDictionaryChunk();
+    columnDictionaryChunk.setValues(oneDictionaryChunkList);
+    writeThriftObject(columnDictionaryChunk);
+  }
+
+  /**
+   * This method will check and created the directory path where dictionary file has to be created
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  private void init() throws IOException {
+    initDictionaryChunkSize();
+    initPaths();
+    boolean dictFileExists = CarbonUtil.isFileExists(this.dictionaryFilePath);
+    if (dictFileExists && CarbonUtil.isFileExists(this.dictionaryMetaFilePath)) {
+      this.chunk_start_offset = CarbonUtil.getFileSize(this.dictionaryFilePath);
+      validateDictionaryFileOffsetWithLastSegmentEntryOffset();
+    } else if (dictFileExists) {
+      FileFactory.getCarbonFile(dictionaryFilePath, FileFactory.getFileType(dictionaryFilePath))
+          .delete();
+    }
+    openThriftWriter(this.dictionaryFilePath);
+    createChunkList();
+  }
+
+  protected void initPaths() {
+    PathService pathService = CarbonCommonFactory.getPathService();
+    CarbonTablePath carbonTablePath = pathService.getCarbonTablePath(columnIdentifier,
+            this.hdfsStorePath, carbonTableIdentifier);
+    this.dictionaryFilePath = carbonTablePath.getDictionaryFilePath(columnIdentifier.getColumnId());
+    this.dictionaryMetaFilePath =
+        carbonTablePath.getDictionaryMetaFilePath(columnIdentifier.getColumnId());
+  }
+
+  /**
+   * initialize the value of dictionary chunk that can be kept in memory at a time
+   */
+  private void initDictionaryChunkSize() {
+    try {
+      dictionary_one_chunk_size = Integer.parseInt(CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.DICTIONARY_ONE_CHUNK_SIZE,
+              CarbonCommonConstants.DICTIONARY_ONE_CHUNK_SIZE_DEFAULT));
+    } catch (NumberFormatException e) {
+      dictionary_one_chunk_size =
+          Integer.parseInt(CarbonCommonConstants.DICTIONARY_ONE_CHUNK_SIZE_DEFAULT);
+      LOGGER.error("Dictionary chunk size not configured properly. Taking default size "
+              + dictionary_one_chunk_size);
+    }
+  }
+
+  /**
+   * initialise one dictionary size chunk list and increment chunk count
+   */
+  private void createChunkList() {
+    this.oneDictionaryChunkList = new ArrayList<ByteBuffer>(dictionary_one_chunk_size);
+    chunk_count++;
+  }
+
+  /**
+   * if file already exists then read metadata file and
+   * validate the last entry end offset with file size. If
+   * they are not equal that means some invalid data is present which needs
+   * to be truncated
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  private void validateDictionaryFileOffsetWithLastSegmentEntryOffset() throws IOException {
+    // read last dictionary chunk meta entry from dictionary metadata file
+    chunkMetaObjectForLastSegmentEntry = getChunkMetaObjectForLastSegmentEntry();
+    int bytesToTruncate =
+        (int) (chunk_start_offset - chunkMetaObjectForLastSegmentEntry.getEnd_offset());
+    if (bytesToTruncate > 0) {
+      LOGGER.info("some inconsistency in dictionary file for column " + this.columnIdentifier);
+      // truncate the dictionary data till chunk meta end offset
+      FileFactory.FileType fileType = FileFactory.getFileType(this.dictionaryFilePath);
+      CarbonFile carbonFile = FileFactory.getCarbonFile(this.dictionaryFilePath, fileType);
+      boolean truncateSuccess = carbonFile
+          .truncate(this.dictionaryFilePath, chunkMetaObjectForLastSegmentEntry.getEnd_offset());
+      if (!truncateSuccess) {
+        LOGGER.info("Diction file not truncated successfully for column " + this.columnIdentifier);
+      }
+    }
+  }
+
+  /**
+   * This method will write the dictionary metadata file for a given column
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  private void writeDictionaryMetadataFile() throws IOException {
+    // Format of dictionary metadata file
+    // min, max, start offset, end offset and chunk count
+    int min_surrogate_key = 0;
+    int max_surrogate_key = 0;
+    // case 1: first time dictionary writing
+    // previousMax = 0, totalRecordCount = 5, min = 1, max= 5
+    // case2: file already exists
+    // previousMax = 5, totalRecordCount = 10, min = 6, max = 15
+    // case 3: no unique values, total records 0
+    // previousMax = 15, totalRecordCount = 0, min = 15, max = 15
+    // both min and max equal to previous max
+    if (null != chunkMetaObjectForLastSegmentEntry) {
+      if (0 == totalRecordCount) {
+        min_surrogate_key = chunkMetaObjectForLastSegmentEntry.getMax_surrogate_key();
+      } else {
+        min_surrogate_key = chunkMetaObjectForLastSegmentEntry.getMax_surrogate_key() + 1;
+      }
+      max_surrogate_key =
+          chunkMetaObjectForLastSegmentEntry.getMax_surrogate_key() + totalRecordCount;
+    } else {
+      if (totalRecordCount > 0) {
+        min_surrogate_key = 1;
+      }
+      max_surrogate_key = totalRecordCount;
+    }
+    ColumnDictionaryChunkMeta dictionaryChunkMeta =
+        new ColumnDictionaryChunkMeta(min_surrogate_key, max_surrogate_key, chunk_start_offset,
+            chunk_end_offset, chunk_count);
+    openThriftWriter(this.dictionaryMetaFilePath);
+    // write dictionary metadata file
+    writeThriftObject(dictionaryChunkMeta);
+    closeThriftWriter();
+    LOGGER.info("Dictionary metadata file written successfully for column " + this.columnIdentifier
+            + " at path " + this.dictionaryMetaFilePath);
+  }
+
+  /**
+   * open thrift writer for writing dictionary chunk/meta object
+   *
+   * @param dictionaryFile can be dictionary file name or dictionary metadata file name
+   * @throws IOException if an I/O error occurs
+   */
+  private void openThriftWriter(String dictionaryFile) throws IOException {
+    // create thrift writer instance
+    dictionaryThriftWriter = new ThriftWriter(dictionaryFile, true);
+    // open the file stream
+    dictionaryThriftWriter.open();
+  }
+
+  /**
+   * This method will write the thrift object to a file
+   *
+   * @param dictionaryThriftObject can be dictionary thrift object or dictionary metadata
+   *                               thrift object
+   * @throws IOException if an I/O error occurs
+   */
+  private void writeThriftObject(TBase dictionaryThriftObject) throws IOException {
+    dictionaryThriftWriter.write(dictionaryThriftObject);
+  }
+
+  /**
+   * close dictionary thrift writer
+   */
+  private void closeThriftWriter() {
+    if (null != dictionaryThriftWriter) {
+      dictionaryThriftWriter.close();
+    }
+  }
+
+  /**
+   * This method will read the dictionary chunk metadata thrift object for last entry
+   *
+   * @return last entry of dictionary meta chunk
+   * @throws IOException if an I/O error occurs
+   */
+  private CarbonDictionaryColumnMetaChunk getChunkMetaObjectForLastSegmentEntry()
+      throws IOException {
+    CarbonDictionaryColumnMetaChunk carbonDictionaryColumnMetaChunk = null;
+    CarbonDictionaryMetadataReader columnMetadataReaderImpl = getDictionaryMetadataReader();
+    try {
+      // read the last segment entry for dictionary metadata
+      carbonDictionaryColumnMetaChunk =
+          columnMetadataReaderImpl.readLastEntryOfDictionaryMetaChunk();
+    } finally {
+      // Close metadata reader
+      columnMetadataReaderImpl.close();
+    }
+    return carbonDictionaryColumnMetaChunk;
+  }
+
+  /**
+   * @return
+   */
+  protected CarbonDictionaryMetadataReader getDictionaryMetadataReader() {
+    return new CarbonDictionaryMetadataReaderImpl(hdfsStorePath, carbonTableIdentifier,
+        columnIdentifier);
+  }
+
+  @Override public void commit() throws IOException {
+    if (null != dictionaryThriftWriter) {
+      this.chunk_end_offset = CarbonUtil.getFileSize(this.dictionaryFilePath);
+      writeDictionaryMetadataFile();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/CarbonFooterWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/CarbonFooterWriter.java b/core/src/main/java/org/apache/carbondata/core/writer/CarbonFooterWriter.java
new file mode 100644
index 0000000..04d2b97
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/CarbonFooterWriter.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.writer;
+
+import java.io.IOException;
+
+import org.apache.carbondata.format.FileFooter;
+
+/**
+ * Writes metadata block to the fact table file in thrift
+ * format org.apache.carbondata.format.FileFooter
+ */
+public class CarbonFooterWriter {
+
+  // It is version number of this format class.
+  private static int VERSION_NUMBER = 1;
+
+  // Fact file path
+  private String filePath;
+
+  public CarbonFooterWriter(String filePath) {
+    this.filePath = filePath;
+  }
+
+  /**
+   * It writes FileFooter thrift format object to file.
+   *
+   * @param footer
+   * @param currentPosition At where this metadata is going to be written.
+   * @throws IOException
+   */
+  public void writeFooter(FileFooter footer, long currentPosition) throws IOException {
+
+    ThriftWriter thriftWriter = openThriftWriter(filePath);
+    footer.setVersion(VERSION_NUMBER);
+    try {
+      thriftWriter.write(footer);
+      thriftWriter.writeOffset(currentPosition);
+    } catch (Exception e) {
+      throw e;
+    } finally {
+      thriftWriter.close();
+    }
+  }
+
+  /**
+   * open thrift writer for writing dictionary chunk/meta object
+   */
+  private ThriftWriter openThriftWriter(String filePath) throws IOException {
+    // create thrift writer instance
+    ThriftWriter thriftWriter = new ThriftWriter(filePath, true);
+    // open the file stream
+    thriftWriter.open();
+    return thriftWriter;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileWriter.java b/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileWriter.java
new file mode 100644
index 0000000..bf6fc3b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/CarbonIndexFileWriter.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.writer;
+
+import java.io.IOException;
+
+import org.apache.thrift.TBase;
+
+/**
+ * Reader class which will be used to read the index file
+ */
+public class CarbonIndexFileWriter {
+
+  /**
+   * thrift writer object
+   */
+  private ThriftWriter thriftWriter;
+
+  /**
+   * It writes thrift object to file
+   *
+   * @param footer
+   * @throws IOException
+   */
+  public void writeThrift(TBase indexObject) throws IOException {
+    thriftWriter.write(indexObject);
+  }
+
+  /**
+   * Below method will be used to open the thrift writer
+   *
+   * @param filePath file path where data need to be written
+   * @throws IOException throws io exception in case of any failure
+   */
+  public void openThriftWriter(String filePath) throws IOException {
+    // create thrift writer instance
+    thriftWriter = new ThriftWriter(filePath, true);
+    // open the file stream
+    thriftWriter.open();
+  }
+
+  /**
+   * Below method will be used to close the thrift object
+   */
+  public void close() {
+    thriftWriter.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/HierarchyValueWriterForCSV.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/HierarchyValueWriterForCSV.java b/core/src/main/java/org/apache/carbondata/core/writer/HierarchyValueWriterForCSV.java
new file mode 100644
index 0000000..61a89f9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/HierarchyValueWriterForCSV.java
@@ -0,0 +1,320 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.writer;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+import org.pentaho.di.core.exception.KettleException;
+
+public class HierarchyValueWriterForCSV {
+
+  /**
+   * Comment for <code>LOGGER</code>
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(HierarchyValueWriterForCSV.class.getName());
+  /**
+   * hierarchyName
+   */
+  private String hierarchyName;
+
+  /**
+   * bufferedOutStream
+   */
+  private FileChannel outPutFileChannel;
+
+  /**
+   * storeFolderLocation
+   */
+  private String storeFolderLocation;
+
+  /**
+   * intialized
+   */
+  private boolean intialized;
+
+  /**
+   * counter the number of files.
+   */
+  private int counter;
+
+  /**
+   * byteArrayList
+   */
+  private List<ByteArrayHolder> byteArrayholder =
+      new ArrayList<ByteArrayHolder>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+
+  /**
+   * toflush
+   */
+  private int toflush;
+
+  public HierarchyValueWriterForCSV(String hierarchy, String storeFolderLocation) {
+    this.hierarchyName = hierarchy;
+    this.storeFolderLocation = storeFolderLocation;
+
+    CarbonProperties instance = CarbonProperties.getInstance();
+
+    this.toflush = Integer.parseInt(instance
+        .getProperty(CarbonCommonConstants.SORT_SIZE, CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL));
+
+    int rowSetSize = Integer.parseInt(instance.getProperty(CarbonCommonConstants.GRAPH_ROWSET_SIZE,
+        CarbonCommonConstants.GRAPH_ROWSET_SIZE_DEFAULT));
+
+    if (this.toflush > rowSetSize) {
+      this.toflush = rowSetSize;
+    }
+
+    updateCounter(hierarchy, storeFolderLocation);
+  }
+
+  /**
+   * @return Returns the byteArrayList.
+   */
+  public List<ByteArrayHolder> getByteArrayList() throws KettleException {
+    return byteArrayholder;
+  }
+
+  public FileChannel getBufferedOutStream() {
+    return outPutFileChannel;
+  }
+
+  private void updateCounter(final String meString, String storeFolderLocation) {
+    File storeFolder = new File(storeFolderLocation);
+
+    File[] listFiles = storeFolder.listFiles(new FileFilter() {
+
+      @Override public boolean accept(File file) {
+        if (file.getName().indexOf(meString) > -1)
+
+        {
+          return true;
+        }
+        return false;
+      }
+    });
+
+    if (null == listFiles || listFiles.length == 0) {
+      counter = 0;
+      return;
+    }
+
+    for (File hierFile : listFiles) {
+      String hierFileName = hierFile.getName();
+
+      if (hierFileName.endsWith(CarbonCommonConstants.FILE_INPROGRESS_STATUS)) {
+        hierFileName = hierFileName.substring(0, hierFileName.lastIndexOf('.'));
+        try {
+          counter = Integer.parseInt(hierFileName.substring(hierFileName.length() - 1));
+        } catch (NumberFormatException nfe) {
+
+          if (new File(hierFileName + '0' + CarbonCommonConstants.LEVEL_FILE_EXTENSION).exists()) {
+            // Need to skip because the case can come in which server went down while files were
+            // merging and the other hierarchy files were not deleted, and the current file
+            // status is inrogress. so again we will merge the files and rename to normal file
+            LOGGER.info("Need to skip as this can be case in which hierarchy file already renamed");
+            if (hierFile.delete()) {
+              LOGGER.info("Deleted the Inprogress hierarchy Files.");
+            }
+          } else {
+            // levelfileName0.level file not exist that means files is merged and other
+            // files got deleted. while renaming this file from inprogress to normal file,
+            // server got restarted/killed. so we need to rename the file to normal.
+
+            File inprogressFile = new File(storeFolder + File.separator + hierFile.getName());
+            File changetoName = new File(storeFolder + File.separator + hierFileName);
+
+            if (inprogressFile.renameTo(changetoName)) {
+              LOGGER.info(
+                  "Renaming the level Files while creating the new instance on server startup.");
+            }
+
+          }
+
+        }
+      }
+
+      String val = hierFileName.substring(hierFileName.length() - 1);
+
+      int parsedVal = getIntValue(val);
+
+      if (counter < parsedVal) {
+        counter = parsedVal;
+      }
+    }
+    counter++;
+  }
+
+  private int getIntValue(String val) {
+    int parsedVal = 0;
+    try {
+      parsedVal = Integer.parseInt(val);
+    } catch (NumberFormatException nfe) {
+      LOGGER.info("Hierarchy File is already renamed so there will not be"
+              + "any need to keep the counter");
+    }
+    return parsedVal;
+  }
+
+  private void intialize() throws KettleException {
+    intialized = true;
+
+    File f = new File(storeFolderLocation + File.separator + hierarchyName + counter
+        + CarbonCommonConstants.FILE_INPROGRESS_STATUS);
+
+    counter++;
+
+    FileOutputStream fos = null;
+
+    boolean isFileCreated = false;
+    if (!f.exists()) {
+      try {
+        isFileCreated = f.createNewFile();
+
+      } catch (IOException e) {
+        //not required: findbugs fix
+        throw new KettleException("unable to create member mapping file", e);
+      }
+      if (!isFileCreated) {
+        throw new KettleException("unable to create file" + f.getAbsolutePath());
+      }
+    }
+
+    try {
+      fos = new FileOutputStream(f);
+
+      outPutFileChannel = fos.getChannel();
+    } catch (FileNotFoundException e) {
+      closeStreamAndDeleteFile(f, outPutFileChannel, fos);
+      throw new KettleException("member Mapping File not found to write mapping info", e);
+    }
+  }
+
+  public void writeIntoHierarchyFile(byte[] bytes, int primaryKey) throws KettleException {
+    if (!intialized) {
+      intialize();
+    }
+
+    ByteBuffer byteBuffer = storeValueInCache(bytes, primaryKey);
+
+    try {
+      byteBuffer.flip();
+      outPutFileChannel.write(byteBuffer);
+    } catch (IOException e) {
+      throw new KettleException("Error while writting in the hierarchy mapping file", e);
+    }
+  }
+
+  private ByteBuffer storeValueInCache(byte[] bytes, int primaryKey) {
+
+    // adding 4 to store the total length of the row at the beginning
+    ByteBuffer buffer = ByteBuffer.allocate(bytes.length + 4);
+
+    buffer.put(bytes);
+    buffer.putInt(primaryKey);
+
+    return buffer;
+  }
+
+  public void performRequiredOperation() throws KettleException {
+    if (byteArrayholder.size() == 0) {
+      return;
+    }
+    //write to the file and close the stream.
+    Collections.sort(byteArrayholder);
+
+    for (ByteArrayHolder byteArray : byteArrayholder) {
+      writeIntoHierarchyFile(byteArray.getMdKey(), byteArray.getPrimaryKey());
+    }
+
+    CarbonUtil.closeStreams(outPutFileChannel);
+
+    //rename the inprogress file to normal .level file
+    String filePath = this.storeFolderLocation + File.separator + hierarchyName + (counter - 1)
+        + CarbonCommonConstants.FILE_INPROGRESS_STATUS;
+    File inProgressFile = new File(filePath);
+    String inprogressFileName = inProgressFile.getName();
+
+    String changedFileName = inprogressFileName.substring(0, inprogressFileName.lastIndexOf('.'));
+
+    File orgFinalName = new File(this.storeFolderLocation + File.separator + changedFileName);
+
+    if (!inProgressFile.renameTo(orgFinalName)) {
+      LOGGER.error("Not able to rename file : " + inprogressFileName);
+    }
+
+    //create the new outputStream
+    try {
+      intialize();
+    } catch (KettleException e) {
+      LOGGER.error("Not able to create output stream for file:" + hierarchyName + (counter - 1));
+    }
+
+    //clear the byte array holder also.
+    byteArrayholder.clear();
+  }
+
+  private void closeStreamAndDeleteFile(File f, Closeable... streams) throws KettleException {
+    boolean isDeleted = false;
+    for (Closeable stream : streams) {
+      if (null != stream) {
+        try {
+          stream.close();
+        } catch (IOException e) {
+          LOGGER.error(e, "unable to close the stream ");
+        }
+
+      }
+    }
+
+    // delete the file
+    isDeleted = f.delete();
+    if (!isDeleted) {
+      LOGGER.error("Unable to delete the file " + f.getAbsolutePath());
+    }
+
+  }
+
+  public String getHierarchyName() {
+    return hierarchyName;
+  }
+
+  public int getCounter() {
+    return counter;
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/ThriftWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/ThriftWriter.java b/core/src/main/java/org/apache/carbondata/core/writer/ThriftWriter.java
new file mode 100644
index 0000000..c232fb2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/ThriftWriter.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.writer;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.thrift.TBase;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TIOStreamTransport;
+
+/**
+ * Simple class that makes it easy to write Thrift objects to disk.
+ */
+public class ThriftWriter {
+
+  /**
+   * buffer size
+   */
+  private static final int bufferSize = 2048;
+
+  /**
+   * File to write to.
+   */
+  private String fileName;
+
+  /**
+   * For writing to the file.
+   */
+  private DataOutputStream dataOutputStream;
+
+  /**
+   * For binary serialization of objects.
+   */
+  private TProtocol binaryOut;
+
+  /**
+   * flag to append to existing file
+   */
+  private boolean append;
+
+  /**
+   * Constructor.
+   */
+  public ThriftWriter(String fileName, boolean append) {
+    this.fileName = fileName;
+    this.append = append;
+  }
+
+  /**
+   * Open the file for writing.
+   */
+  public void open() throws IOException {
+    FileFactory.FileType fileType = FileFactory.getFileType(fileName);
+    dataOutputStream = FileFactory.getDataOutputStream(fileName, fileType, bufferSize, append);
+    binaryOut = new TCompactProtocol(new TIOStreamTransport(dataOutputStream));
+  }
+
+  /**
+   * Write the object to disk.
+   */
+  public void write(TBase t) throws IOException {
+    try {
+      t.write(binaryOut);
+      dataOutputStream.flush();
+    } catch (TException e) {
+      throw new IOException(e);
+    }
+  }
+
+  /**
+   * Write the offset to the file
+   *
+   * @param offset
+   * @throws IOException
+   */
+  public void writeOffset(long offset) throws IOException {
+    dataOutputStream.writeLong(offset);
+  }
+
+  /**
+   * Close the file stream.
+   */
+  public void close() {
+    CarbonUtil.closeStreams(dataOutputStream);
+  }
+
+  /**
+   * Flush data to HDFS file
+   */
+  public void sync() throws IOException {
+    if (dataOutputStream instanceof FSDataOutputStream) {
+      ((FSDataOutputStream) dataOutputStream).hsync();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/exception/CarbonDataWriterException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/exception/CarbonDataWriterException.java b/core/src/main/java/org/apache/carbondata/core/writer/exception/CarbonDataWriterException.java
new file mode 100644
index 0000000..2d978fb
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/exception/CarbonDataWriterException.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.writer.exception;
+
+import java.util.Locale;
+
+public class CarbonDataWriterException extends Exception {
+
+  /**
+   * default serial version ID.
+   */
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * The Error message.
+   */
+  private String msg = "";
+
+  /**
+   * Constructor
+   *
+   * @param msg The error message for this exception.
+   */
+  public CarbonDataWriterException(String msg) {
+    super(msg);
+    this.msg = msg;
+  }
+
+  /**
+   * Constructor
+   *
+   * @param msg The error message for this exception.
+   */
+  public CarbonDataWriterException(String msg, Throwable t) {
+    super(msg, t);
+    this.msg = msg;
+  }
+
+  /**
+   * getLocalizedMessage
+   */
+  @Override public String getLocalizedMessage() {
+    return super.getLocalizedMessage();
+  }
+
+  /**
+   * getMessage
+   */
+  public String getMessage() {
+    return this.msg;
+  }
+
+  /**
+   * This method is used to get the localized message.
+   *
+   * @param locale - A Locale object represents a specific geographical,
+   *               political, or cultural region.
+   * @return - Localized error message.
+   */
+  public String getLocalizedMessage(Locale locale) {
+    return "";
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriter.java b/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriter.java
new file mode 100644
index 0000000..e9d7b1d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriter.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.writer.sortindex;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Interface for writing the dictionary sort index and sort index revers data.
+ */
+public interface CarbonDictionarySortIndexWriter extends Closeable {
+
+  /**
+   * The method is used write the dictionary sortIndex data to columns
+   * sortedIndex file in thrif format.
+   *
+   * @param sortIndexList list of sortIndex
+   * @throws IOException In Case of any I/O errors occurs.
+   */
+  public void writeSortIndex(List<Integer> sortIndexList) throws IOException;
+
+  /**
+   * The method is used write the dictionary sortIndexInverted data to columns
+   * sortedIndex file in thrif format.
+   *
+   * @param invertedSortIndexList list of  sortIndexInverted
+   * @throws IOException In Case of any I/O errors occurs.
+   */
+  public void writeInvertedSortIndex(List<Integer> invertedSortIndexList) throws IOException;
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriterImpl.java b/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriterImpl.java
new file mode 100644
index 0000000..b6df97d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/writer/sortindex/CarbonDictionarySortIndexWriterImpl.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.writer.sortindex;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+
+import org.apache.carbondata.common.factory.CarbonCommonFactory;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
+import org.apache.carbondata.core.carbon.ColumnIdentifier;
+import org.apache.carbondata.core.carbon.path.CarbonTablePath;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.service.PathService;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.writer.ThriftWriter;
+import org.apache.carbondata.format.ColumnSortInfo;
+
+/**
+ * The class responsible for writing the dictionary/column sort index and sort index inverted data
+ * in the thrift format
+ */
+public class CarbonDictionarySortIndexWriterImpl implements CarbonDictionarySortIndexWriter {
+
+  /**
+   * carbonTable Identifier holding the info of databaseName and tableName
+   */
+  protected CarbonTableIdentifier carbonTableIdentifier;
+
+  /**
+   * column name
+   */
+  protected ColumnIdentifier columnIdentifier;
+
+  /**
+   * carbon store location
+   */
+  protected String carbonStorePath;
+  /**
+   * Path of dictionary sort index file for which the sortIndex to be written
+   */
+  protected String sortIndexFilePath;
+  /**
+   * Instance of thrift writer to write the data
+   */
+  private ThriftWriter sortIndexThriftWriter;
+
+  /**
+   * Column sort info thrift instance.
+   */
+  private ColumnSortInfo columnSortInfo = new ColumnSortInfo();
+
+  /**
+   * Comment for <code>LOGGER</code>
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(CarbonDictionarySortIndexWriterImpl.class.getName());
+
+  /**
+   * @param carbonStorePath       Carbon store path
+   * @param carbonTableIdentifier table identifier which will give table name and database name
+   * @param columnIdentifier      column unique identifier
+   */
+  public CarbonDictionarySortIndexWriterImpl(final CarbonTableIdentifier carbonTableIdentifier,
+      final ColumnIdentifier columnIdentifier, final String carbonStorePath) {
+    this.carbonTableIdentifier = carbonTableIdentifier;
+    this.columnIdentifier = columnIdentifier;
+    this.carbonStorePath = carbonStorePath;
+  }
+
+  /**
+   * The method is used populate the dictionary sortIndex data to columnSortInfo
+   * in thrif format.
+   *
+   * @param sortIndexList list of sortIndex
+   * @throws IOException In Case of any I/O errors occurs.
+   */
+  @Override public void writeSortIndex(List<Integer> sortIndexList) throws IOException {
+    columnSortInfo.setSort_index(sortIndexList);
+  }
+
+  /**
+   * The method is used populate the dictionary Inverted sortIndex data to columnSortInfo
+   * in thrif format.
+   *
+   * @param invertedSortIndexList list of  sortIndexInverted
+   * @throws IOException In Case of any I/O errors occurs.
+   */
+  @Override public void writeInvertedSortIndex(List<Integer> invertedSortIndexList)
+      throws IOException {
+    columnSortInfo.setSort_index_inverted(invertedSortIndexList);
+  }
+
+  /**
+   * Initialize the sortIndexFilePath and open writing stream
+   * for dictionary sortIndex file thrif writer
+   * write the column sort info to the store when both sort index  and sort index
+   * inverted are populated.
+   * existing sort index file has to be overwritten with new sort index data
+   * columnSortInfo having null sortIndex and invertedSortIndex will not be written
+   */
+  private void writeColumnSortInfo() throws IOException {
+    boolean isNotNull =
+        null != columnSortInfo.getSort_index() && null != columnSortInfo.sort_index_inverted;
+    if (isNotNull) {
+      initPath();
+      String folderContainingFile = CarbonTablePath.getFolderContainingFile(this.sortIndexFilePath);
+      boolean created = CarbonUtil.checkAndCreateFolder(folderContainingFile);
+      if (!created) {
+        LOGGER.error("Database metadata folder creation status :: " + created);
+        throw new IOException("Failed to created database metadata folder");
+      }
+      try {
+
+        this.sortIndexThriftWriter = new ThriftWriter(this.sortIndexFilePath, false);
+        this.sortIndexThriftWriter.open();
+        sortIndexThriftWriter.write(columnSortInfo);
+      } catch (IOException ie) {
+        LOGGER.error(ie,
+            "problem while writing the dictionary sort index file.");
+        throw new IOException("problem while writing the dictionary sort index file.", ie);
+      } finally {
+        if (null != sortIndexThriftWriter) {
+          this.sortIndexThriftWriter.close();
+        }
+        this.sortIndexFilePath = null;
+      }
+    }
+  }
+
+  protected void initPath() {
+    PathService pathService = CarbonCommonFactory.getPathService();
+    CarbonTablePath carbonTablePath = pathService
+        .getCarbonTablePath(columnIdentifier, carbonStorePath, carbonTableIdentifier);
+    String dictionaryPath = carbonTablePath.getDictionaryFilePath(columnIdentifier.getColumnId());
+    long dictOffset = CarbonUtil.getFileSize(dictionaryPath);
+    this.sortIndexFilePath =
+        carbonTablePath.getSortIndexFilePath(columnIdentifier.getColumnId(), dictOffset);
+    cleanUpOldSortIndex(carbonTablePath, dictionaryPath);
+  }
+
+  /**
+   * It cleans up old unused sortindex file
+   *
+   * @param carbonTablePath
+   */
+  protected void cleanUpOldSortIndex(CarbonTablePath carbonTablePath, String dictPath) {
+    CarbonFile dictFile =
+        FileFactory.getCarbonFile(dictPath, FileFactory.getFileType(dictPath));
+    CarbonFile[] files =
+        carbonTablePath.getSortIndexFiles(dictFile.getParentFile(),
+            columnIdentifier.getColumnId());
+    int maxTime;
+    try {
+      maxTime = Integer.parseInt(CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.MAX_QUERY_EXECUTION_TIME));
+    } catch (NumberFormatException e) {
+      maxTime = CarbonCommonConstants.DEFAULT_MAX_QUERY_EXECUTION_TIME;
+    }
+    if (null != files) {
+      Arrays.sort(files, new Comparator<CarbonFile>() {
+        @Override public int compare(CarbonFile o1, CarbonFile o2) {
+          return o1.getName().compareTo(o2.getName());
+        }
+      });
+      for (int i = 0; i < files.length - 1; i++) {
+        long difference = System.currentTimeMillis() - files[i].getLastModifiedTime();
+        long minutesElapsed = (difference / (1000 * 60));
+        if (minutesElapsed > maxTime) {
+          if (!files[i].delete()) {
+            LOGGER.warn("Failed to delete sortindex file." + files[i].getAbsolutePath());
+          } else {
+            LOGGER.info("Sort index file is deleted." + files[i].getAbsolutePath());
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Closes this stream and releases any system resources associated
+   * with it. If the stream is already closed then invoking this
+   * method has no effect.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  @Override public void close() throws IOException {
+    writeColumnSortInfo();
+    if (null != sortIndexThriftWriter) {
+      sortIndexThriftWriter.close();
+    }
+  }
+}



[45/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeNonLeafNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeNonLeafNode.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeNonLeafNode.java
new file mode 100644
index 0000000..cfbe06d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeNonLeafNode.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.impl.btree;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+
+/**
+ * No leaf node of a b+tree class which will keep the matadata(start key) of the
+ * leaf node
+ */
+public class BTreeNonLeafNode implements BTreeNode {
+
+  /**
+   * Child nodes
+   */
+  private BTreeNode[] children;
+
+  /**
+   * list of keys in non leaf
+   */
+  private List<IndexKey> listOfKeys;
+
+  public BTreeNonLeafNode() {
+    // creating a list which will store all the indexes
+    listOfKeys = new ArrayList<IndexKey>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+  }
+
+  /**
+   * below method will return the one node indexes
+   *
+   * @return getting a complete leaf ]node keys
+   */
+  @Override public IndexKey[] getNodeKeys() {
+    return listOfKeys.toArray(new IndexKey[listOfKeys.size()]);
+  }
+
+  /**
+   * as it is a non leaf node it will have the reference of all the leaf node
+   * under it, setting all the children
+   *
+   * @param leaf nodes
+   */
+  @Override public void setChildren(BTreeNode[] children) {
+    this.children = children;
+  }
+
+  /**
+   * setting the next node
+   */
+  @Override public void setNextNode(BTreeNode nextNode) {
+    // no required in case of non leaf node
+  }
+
+  /**
+   * get the leaf node based on children
+   *
+   * @return leaf node
+   */
+  @Override public BTreeNode getChild(int index) {
+    return this.children[index];
+  }
+
+  /**
+   * add a key of a leaf node
+   *
+   * @param leaf node start keys
+   */
+  @Override public void setKey(IndexKey key) {
+    listOfKeys.add(key);
+
+  }
+
+  /**
+   * @return whether its a leaf node or not
+   */
+  @Override public boolean isLeafNode() {
+    return false;
+  }
+
+  /**
+   * Method to get the next block this can be used while scanning when
+   * iterator of this class can be used iterate over blocks
+   *
+   * @return next block
+   */
+  @Override public DataRefNode getNextDataRefNode() {
+    throw new UnsupportedOperationException("Unsupported operation");
+  }
+
+  /**
+   * to get the number of keys tuples present in the block
+   *
+   * @return number of keys in the block
+   */
+  @Override public int nodeSize() {
+    return listOfKeys.size();
+  }
+
+  /**
+   * Method can be used to get the block index .This can be used when multiple
+   * thread can be used scan group of blocks in that can we can assign the
+   * some of the blocks to one thread and some to other
+   *
+   * @return block number
+   */
+  @Override public long nodeNumber() {
+    throw new UnsupportedOperationException("Unsupported operation");
+  }
+
+  /**
+   * This method will be used to get the max value of all the columns this can
+   * be used in case of filter query
+   *
+   * @param max value of all the columns
+   */
+  @Override public byte[][] getColumnsMaxValue() {
+    // operation of getting the max value is not supported as its a non leaf
+    // node
+    // and in case of B+Tree data will be stored only in leaf node and
+    // intermediate
+    // node will be used only for searching the leaf node
+    throw new UnsupportedOperationException("Unsupported operation");
+  }
+
+  /**
+   * This method will be used to get the max value of all the columns this can
+   * be used in case of filter query
+   *
+   * @param min value of all the columns
+   */
+  @Override public byte[][] getColumnsMinValue() {
+    // operation of getting the min value is not supported as its a non leaf
+    // node
+    // and in case of B+Tree data will be stored only in leaf node and
+    // intermediate
+    // node will be used only for searching the leaf node
+    throw new UnsupportedOperationException("Unsupported operation");
+  }
+
+  /**
+   * Below method will be used to get the dimension chunks
+   *
+   * @param fileReader   file reader to read the chunks from file
+   * @param blockIndexes indexes of the blocks need to be read
+   * @return dimension data chunks
+   */
+  @Override public DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader,
+      int[] blockIndexes) {
+
+    // operation of getting the dimension chunks is not supported as its a
+    // non leaf node
+    // and in case of B+Tree data will be stored only in leaf node and
+    // intermediate
+    // node will be used only for searching the leaf node
+    throw new UnsupportedOperationException("Unsupported operation");
+  }
+
+  /**
+   * Below method will be used to get the dimension chunk
+   *
+   * @param fileReader file reader to read the chunk from file
+   * @param blockIndex block index to be read
+   * @return dimension data chunk
+   */
+  @Override public DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader,
+      int blockIndexes) {
+    // operation of getting the dimension chunk is not supported as its a
+    // non leaf node
+    // and in case of B+Tree data will be stored only in leaf node and
+    // intermediate
+    // node will be used only for searching the leaf node
+    throw new UnsupportedOperationException("Unsupported operation");
+  }
+
+  /**
+   * Below method will be used to get the measure chunk
+   *
+   * @param fileReader   file reader to read the chunk from file
+   * @param blockIndexes block indexes to be read from file
+   * @return measure column data chunk
+   */
+  @Override public MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader,
+      int[] blockIndexes) {
+    // operation of getting the measure chunk is not supported as its a non
+    // leaf node
+    // and in case of B+Tree data will be stored only in leaf node and
+    // intermediate
+    // node will be used only for searching the leaf node
+    throw new UnsupportedOperationException("Unsupported operation");
+  }
+
+  /**
+   * Below method will be used to read the measure chunk
+   *
+   * @param fileReader file read to read the file chunk
+   * @param blockIndex block index to be read from file
+   * @return measure data chunk
+   */
+
+  @Override public MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex) {
+    // operation of getting the measure chunk is not supported as its a non
+    // leaf node
+    // and in case of B+Tree data will be stored only in leaf node and
+    // intermediate
+    // node will be used only for searching the leaf node
+    throw new UnsupportedOperationException("Unsupported operation");
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockBTreeBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockBTreeBuilder.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockBTreeBuilder.java
new file mode 100644
index 0000000..3828818
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockBTreeBuilder.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.impl.btree;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.BTreeBuilderInfo;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * Below class will be used to build the btree BTree will be built for all the
+ * blocks of a segment
+ */
+public class BlockBTreeBuilder extends AbstractBTreeBuilder {
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(BlockBTreeBuilder.class.getName());
+
+  /**
+   * Below method will be used to build the segment info bplus tree format
+   * Tree will be a read only tree, and it will be build on Bottoms up
+   * approach first all the leaf node will be built and then intermediate node
+   * in our case one leaf node will have not only one entry it will have group
+   * of entries
+   */
+  @Override public void build(BTreeBuilderInfo btreeBuilderInfo) {
+    int groupCounter;
+    int nInternal = 0;
+    BTreeNode curNode = null;
+    BTreeNode prevNode = null;
+    List<BTreeNode[]> nodeGroups =
+        new ArrayList<BTreeNode[]>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+    BTreeNode[] currentGroup = null;
+    List<List<IndexKey>> interNSKeyList =
+        new ArrayList<List<IndexKey>>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+    List<IndexKey> leafNSKeyList = null;
+    long nodeNumber = 0;
+    for (int metadataIndex = 0;
+         metadataIndex < btreeBuilderInfo.getFooterList().size(); metadataIndex++) {
+      // creating a leaf node
+      curNode = new BlockBTreeLeafNode(btreeBuilderInfo, metadataIndex, nodeNumber++);
+      nLeaf++;
+      // setting a next node as its a b+tree
+      // so all the leaf node will be chained
+      // will be stored in linked list
+      if (prevNode != null) {
+        prevNode.setNextNode(curNode);
+      }
+      prevNode = curNode;
+      // as intermediate node will have more than one leaf
+      // in cerating a group
+      groupCounter = (nLeaf - 1) % (maxNumberOfEntriesInNonLeafNodes);
+      if (groupCounter == 0) {
+        // Create new node group if current group is full
+        leafNSKeyList = new ArrayList<IndexKey>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
+        currentGroup = new BTreeNode[maxNumberOfEntriesInNonLeafNodes];
+        nodeGroups.add(currentGroup);
+        nInternal++;
+        interNSKeyList.add(leafNSKeyList);
+      }
+      if (null != leafNSKeyList) {
+        leafNSKeyList.add(convertStartKeyToNodeEntry(
+            btreeBuilderInfo.getFooterList().get(metadataIndex).getBlockletIndex()
+                .getBtreeIndex().getStartKey()));
+      }
+      if (null != currentGroup) {
+        currentGroup[groupCounter] = curNode;
+      }
+    }
+    if (nLeaf == 0) {
+      return;
+    }
+    // adding a intermediate node
+    addIntermediateNode(curNode, nodeGroups, currentGroup, interNSKeyList, nInternal);
+    LOGGER.info("************************Total Number Rows In BTREE: " + nLeaf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockBTreeLeafNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockBTreeLeafNode.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockBTreeLeafNode.java
new file mode 100644
index 0000000..fcc98c2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockBTreeLeafNode.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.impl.btree;
+
+import org.apache.carbondata.core.carbon.datastore.BTreeBuilderInfo;
+import org.apache.carbondata.core.carbon.datastore.block.TableBlockInfo;
+import org.apache.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
+import org.apache.carbondata.core.carbon.metadata.blocklet.index.BlockletMinMaxIndex;
+
+/**
+ * Leaf node for btree where only min max will be store this can be used from
+ * driver when only we need to find whether particular block be selected for
+ * query execution
+ */
+public class BlockBTreeLeafNode extends AbstractBTreeLeafNode {
+
+  private TableBlockInfo blockInfo;
+
+  /**
+   * Create a leaf node
+   *
+   * @param builderInfos  builder infos which have required metadata to create a leaf
+   *                      node
+   * @param leafIndex     leaf node index
+   * @param metadataIndex metadata index
+   */
+  public BlockBTreeLeafNode(BTreeBuilderInfo builderInfos, int metadataIndex, long nodeNumber) {
+    DataFileFooter footer = builderInfos.getFooterList().get(metadataIndex);
+    BlockletMinMaxIndex minMaxIndex = footer.getBlockletIndex().getMinMaxIndex();
+    maxKeyOfColumns = minMaxIndex.getMaxValues();
+    minKeyOfColumns = minMaxIndex.getMinValues();
+    numberOfKeys = 1;
+    this.nodeNumber = nodeNumber;
+    this.blockInfo = footer.getTableBlockInfo();
+  }
+
+  /**
+   * Below method is to get the table block info
+   * This will be used only in case of BlockBtree leaf node which will
+   * be used to from driver
+   *
+   * @return TableBlockInfo
+   */
+  public TableBlockInfo getTableBlockInfo() {
+    return blockInfo;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeBuilder.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeBuilder.java
new file mode 100644
index 0000000..3d6c11d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeBuilder.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.impl.btree;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.BTreeBuilderInfo;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+
+/**
+ * Btree based builder which will build the leaf node in a b+ tree format
+ */
+public class BlockletBTreeBuilder extends AbstractBTreeBuilder {
+
+  /**
+   * Attribute for Carbon LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(BlockletBTreeBuilder.class.getName());
+
+  /**
+   * Below method will be used to build the segment info bplus tree format
+   * Tree will be a read only tree, and it will be build on Bottoms up approach
+   * first all the leaf node will be built and then intermediate node
+   * in our case one leaf node will have not only one entry it will have group of entries
+   */
+  @Override public void build(BTreeBuilderInfo segmentBuilderInfos) {
+    long totalNumberOfTuple = 0;
+    int groupCounter;
+    int nInternal = 0;
+    BTreeNode curNode = null;
+    BTreeNode prevNode = null;
+    List<BTreeNode[]> nodeGroups =
+        new ArrayList<BTreeNode[]>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    BTreeNode[] currentGroup = null;
+    List<List<IndexKey>> interNSKeyList =
+        new ArrayList<List<IndexKey>>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    List<IndexKey> leafNSKeyList = null;
+    long nodeNumber = 0;
+    for (int index = 0;
+         index < segmentBuilderInfos.getFooterList().get(0).getBlockletList()
+             .size(); index++) {
+      // creating a leaf node
+      curNode = new BlockletBTreeLeafNode(segmentBuilderInfos, index, nodeNumber++);
+      totalNumberOfTuple +=
+          segmentBuilderInfos.getFooterList().get(0).getBlockletList().get(index)
+              .getNumberOfRows();
+      nLeaf++;
+      // setting a next node as its a b+tree
+      // so all the leaf node will be chained
+      // will be stored in linked list
+      if (prevNode != null) {
+        prevNode.setNextNode(curNode);
+      }
+      prevNode = curNode;
+      // as intermediate node will have more than one leaf
+      // in cerating a group
+      groupCounter = (nLeaf - 1) % (maxNumberOfEntriesInNonLeafNodes);
+      if (groupCounter == 0) {
+        // Create new node group if current group is full
+        leafNSKeyList = new ArrayList<IndexKey>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+        currentGroup = new BTreeNode[maxNumberOfEntriesInNonLeafNodes];
+        nodeGroups.add(currentGroup);
+        nInternal++;
+        interNSKeyList.add(leafNSKeyList);
+      }
+      if (null != leafNSKeyList) {
+        leafNSKeyList.add(convertStartKeyToNodeEntry(
+            segmentBuilderInfos.getFooterList().get(0).getBlockletList().get(index)
+                .getBlockletIndex().getBtreeIndex().getStartKey()));
+      }
+      if (null != currentGroup) {
+        currentGroup[groupCounter] = curNode;
+      }
+    }
+    if (totalNumberOfTuple == 0) {
+      return;
+    }
+    // adding a intermediate node
+    addIntermediateNode(curNode, nodeGroups, currentGroup, interNSKeyList, nInternal);
+    LOGGER.info("****************************Total Number Rows In BTREE: " + totalNumberOfTuple);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeLeafNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeLeafNode.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeLeafNode.java
new file mode 100644
index 0000000..2bbddda
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BlockletBTreeLeafNode.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.datastore.impl.btree;
+
+import org.apache.carbondata.core.carbon.datastore.BTreeBuilderInfo;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.reader.DimensionColumnChunkReader;
+import org.apache.carbondata.core.carbon.datastore.chunk.reader.MeasureColumnChunkReader;
+import org.apache.carbondata.core.carbon.datastore.chunk.reader.dimension.CompressedDimensionChunkFileBasedReader;
+import org.apache.carbondata.core.carbon.datastore.chunk.reader.measure.CompressedMeasureChunkFileBasedReader;
+import org.apache.carbondata.core.carbon.metadata.blocklet.index.BlockletMinMaxIndex;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+/**
+ * Leaf node class of a Blocklet btree
+ */
+public class BlockletBTreeLeafNode extends AbstractBTreeLeafNode {
+
+  /**
+   * reader for dimension chunk
+   */
+  private DimensionColumnChunkReader dimensionChunksReader;
+
+  /**
+   * reader of measure chunk
+   */
+  private MeasureColumnChunkReader measureColumnChunkReader;
+
+  /**
+   * Create a leaf node
+   *
+   * @param builderInfos builder infos which have required metadata to create a leaf node
+   * @param leafIndex    leaf node index
+   * @param nodeNumber   node number of the node
+   *                     this will be used during query execution when we can
+   *                     give some leaf node of a btree to one executor some to other
+   */
+  public BlockletBTreeLeafNode(BTreeBuilderInfo builderInfos, int leafIndex, long nodeNumber) {
+    // get a lead node min max
+    BlockletMinMaxIndex minMaxIndex =
+        builderInfos.getFooterList().get(0).getBlockletList().get(leafIndex)
+            .getBlockletIndex().getMinMaxIndex();
+    // max key of the columns
+    maxKeyOfColumns = minMaxIndex.getMaxValues();
+    // min keys of the columns
+    minKeyOfColumns = minMaxIndex.getMinValues();
+    // number of keys present in the leaf
+    numberOfKeys = builderInfos.getFooterList().get(0).getBlockletList().get(leafIndex)
+        .getNumberOfRows();
+    // create a instance of dimension chunk
+    dimensionChunksReader = new CompressedDimensionChunkFileBasedReader(
+        builderInfos.getFooterList().get(0).getBlockletList().get(leafIndex)
+            .getDimensionColumnChunk(), builderInfos.getDimensionColumnValueSize(),
+        builderInfos.getFooterList().get(0).getTableBlockInfo().getFilePath());
+    // get the value compression model which was used to compress the measure values
+    ValueCompressionModel valueCompressionModel = CarbonUtil.getValueCompressionModel(
+        builderInfos.getFooterList().get(0).getBlockletList().get(leafIndex)
+            .getMeasureColumnChunk());
+    // create a instance of measure column chunk reader
+    measureColumnChunkReader = new CompressedMeasureChunkFileBasedReader(
+        builderInfos.getFooterList().get(0).getBlockletList().get(leafIndex)
+            .getMeasureColumnChunk(), valueCompressionModel,
+            builderInfos.getFooterList().get(0).getTableBlockInfo().getFilePath());
+    this.nodeNumber = nodeNumber;
+  }
+
+  /**
+   * Below method will be used to get the dimension chunks
+   *
+   * @param fileReader   file reader to read the chunks from file
+   * @param blockIndexes indexes of the blocks need to be read
+   * @return dimension data chunks
+   */
+  @Override public DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader,
+      int[] blockIndexes) {
+    return dimensionChunksReader.readDimensionChunks(fileReader, blockIndexes);
+  }
+
+  /**
+   * Below method will be used to get the dimension chunk
+   *
+   * @param fileReader file reader to read the chunk from file
+   * @param blockIndex block index to be read
+   * @return dimension data chunk
+   */
+  @Override public DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader,
+      int blockIndex) {
+    return dimensionChunksReader.readDimensionChunk(fileReader, blockIndex);
+  }
+
+  /**
+   * Below method will be used to get the measure chunk
+   *
+   * @param fileReader   file reader to read the chunk from file
+   * @param blockIndexes block indexes to be read from file
+   * @return measure column data chunk
+   */
+  @Override public MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader,
+      int[] blockIndexes) {
+    return measureColumnChunkReader.readMeasureChunks(fileReader, blockIndexes);
+  }
+
+  /**
+   * Below method will be used to read the measure chunk
+   *
+   * @param fileReader file read to read the file chunk
+   * @param blockIndex block index to be read from file
+   * @return measure data chunk
+   */
+  @Override public MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex) {
+    return measureColumnChunkReader.readMeasureChunk(fileReader, blockIndex);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/CarbonMetadata.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/CarbonMetadata.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/CarbonMetadata.java
new file mode 100644
index 0000000..ce5e457
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/CarbonMetadata.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.carbondata.core.carbon.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.carbon.metadata.schema.table.TableInfo;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+
+/**
+ * Class which persist the information about the tables present the carbon schemas
+ */
+public final class CarbonMetadata {
+
+  /**
+   * meta data instance
+   */
+  private static final CarbonMetadata CARBONMETADATAINSTANCE = new CarbonMetadata();
+
+  /**
+   * holds the list of tableInfo currently present
+   */
+  private Map<String, CarbonTable> tableInfoMap;
+
+  private CarbonMetadata() {
+    // creating a concurrent map as it will be updated by multiple thread
+    tableInfoMap = new ConcurrentHashMap<String, CarbonTable>();
+  }
+
+  public static CarbonMetadata getInstance() {
+    return CARBONMETADATAINSTANCE;
+  }
+
+  /**
+   * removed the table information
+   *
+   * @param tableUniquName
+   */
+  public void removeTable(String tableUniquName) {
+    tableInfoMap.remove(convertToLowerCase(tableUniquName));
+  }
+
+  /**
+   * Below method will be used to set the carbon table
+   * This method will be used in executor side as driver will always have
+   * updated table so from driver during query execution and data loading
+   * we just need to add the table
+   *
+   * @param carbonTable
+   */
+  public void addCarbonTable(CarbonTable carbonTable) {
+    tableInfoMap.put(convertToLowerCase(carbonTable.getTableUniqueName()), carbonTable);
+  }
+
+  /**
+   * method load the table
+   *
+   * @param tableInfo
+   */
+  public void loadTableMetadata(TableInfo tableInfo) {
+    CarbonTable carbonTable = tableInfoMap.get(convertToLowerCase(tableInfo.getTableUniqueName()));
+    if (null == carbonTable || carbonTable.getTableLastUpdatedTime() < tableInfo
+        .getLastUpdatedTime()) {
+      carbonTable = new CarbonTable();
+      carbonTable.loadCarbonTable(tableInfo);
+      tableInfoMap.put(convertToLowerCase(tableInfo.getTableUniqueName()), carbonTable);
+    }
+  }
+
+  /**
+   * Below method to get the loaded carbon table
+   *
+   * @param tableUniqueName
+   * @return
+   */
+  public CarbonTable getCarbonTable(String tableUniqueName) {
+    return tableInfoMap.get(convertToLowerCase(tableUniqueName));
+  }
+
+  /**
+   * @return the number of tables present in the schema
+   */
+  public int getNumberOfTables() {
+    return tableInfoMap.size();
+  }
+
+  /**
+   * returns the given string in lowercase
+   * @param table
+   * @return
+   */
+  public String convertToLowerCase(String table) {
+    return table.toLowerCase();
+  }
+
+  /**
+   * method will return dimension instance based on the column identifier
+   * and table instance passed to it.
+   *
+   * @param carbonTable
+   * @param columnIdentifier
+   * @return CarbonDimension instance
+   */
+  public CarbonDimension getCarbonDimensionBasedOnColIdentifier(CarbonTable carbonTable,
+      String columnIdentifier) {
+    List<CarbonDimension> listOfCarbonDims =
+        carbonTable.getDimensionByTableName(carbonTable.getFactTableName());
+    for (CarbonDimension dimension : listOfCarbonDims) {
+      if (dimension.getColumnId().equals(columnIdentifier)) {
+        return dimension;
+      }
+      if (dimension.numberOfChild() > 0) {
+        CarbonDimension childDim =
+            getCarbonChildDimsBasedOnColIdentifier(columnIdentifier, dimension);
+        if (null != childDim) {
+          return childDim;
+        }
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Below method will be used to get the dimension based on column identifier
+   * for complex dimension children
+   *
+   * @param columnIdentifier column identifier
+   * @param dimension        parent dimension
+   * @return children dimension
+   */
+  private CarbonDimension getCarbonChildDimsBasedOnColIdentifier(String columnIdentifier,
+      CarbonDimension dimension) {
+    for (int i = 0; i < dimension.numberOfChild(); i++) {
+      if (dimension.getListOfChildDimensions().get(i).getColumnId().equals(columnIdentifier)) {
+        return dimension.getListOfChildDimensions().get(i);
+      } else if (dimension.getListOfChildDimensions().get(i).numberOfChild() > 0) {
+        CarbonDimension childDim = getCarbonChildDimsBasedOnColIdentifier(columnIdentifier,
+            dimension.getListOfChildDimensions().get(i));
+        if (null != childDim) {
+          return childDim;
+        }
+      }
+    }
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/BlockletInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/BlockletInfo.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/BlockletInfo.java
new file mode 100644
index 0000000..b2c72aa
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/BlockletInfo.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.metadata.blocklet;
+
+import java.io.Serializable;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
+import org.apache.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex;
+
+/**
+ * class to store the information about the blocklet
+ */
+public class BlockletInfo implements Serializable {
+
+  /**
+   * serialization id
+   */
+  private static final long serialVersionUID = 1873135459695635381L;
+
+  /**
+   * Number of rows in this blocklet
+   */
+  private int numberOfRows;
+
+  /**
+   * Information about dimension chunk of all dimensions in this blocklet
+   */
+  private List<DataChunk> dimensionColumnChunk;
+
+  /**
+   * Information about measure chunk of all measures in this blocklet
+   */
+  private List<DataChunk> measureColumnChunk;
+
+  /**
+   * to store the index like min max and start and end key of each column of the blocklet
+   */
+  private BlockletIndex blockletIndex;
+
+  /**
+   * @return the numberOfRows
+   */
+  public int getNumberOfRows() {
+    return numberOfRows;
+  }
+
+  /**
+   * @param numberOfRows the numberOfRows to set
+   */
+  public void setNumberOfRows(int numberOfRows) {
+    this.numberOfRows = numberOfRows;
+  }
+
+  /**
+   * @return the dimensionColumnChunk
+   */
+  public List<DataChunk> getDimensionColumnChunk() {
+    return dimensionColumnChunk;
+  }
+
+  /**
+   * @param dimensionColumnChunk the dimensionColumnChunk to set
+   */
+  public void setDimensionColumnChunk(List<DataChunk> dimensionColumnChunk) {
+    this.dimensionColumnChunk = dimensionColumnChunk;
+  }
+
+  /**
+   * @return the measureColumnChunk
+   */
+  public List<DataChunk> getMeasureColumnChunk() {
+    return measureColumnChunk;
+  }
+
+  /**
+   * @param measureColumnChunk the measureColumnChunk to set
+   */
+  public void setMeasureColumnChunk(List<DataChunk> measureColumnChunk) {
+    this.measureColumnChunk = measureColumnChunk;
+  }
+
+  /**
+   * @return the blockletIndex
+   */
+  public BlockletIndex getBlockletIndex() {
+    return blockletIndex;
+  }
+
+  /**
+   * @param blockletIndex the blockletIndex to set
+   */
+  public void setBlockletIndex(BlockletIndex blockletIndex) {
+    this.blockletIndex = blockletIndex;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/DataFileFooter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/DataFileFooter.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/DataFileFooter.java
new file mode 100644
index 0000000..55587da
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/DataFileFooter.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.blocklet;
+
+import java.io.Serializable;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.block.TableBlockInfo;
+import org.apache.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+
+/**
+ * Information of one data file
+ */
+public class DataFileFooter implements Serializable {
+
+  /**
+   * serialization id
+   */
+  private static final long serialVersionUID = -7284319972734500751L;
+
+  /**
+   * version used for data compatibility
+   */
+  private int versionId;
+
+  /**
+   * total number of rows in this file
+   */
+  private long numberOfRows;
+
+  /**
+   * Segment info (will be same/repeated for all block in this segment)
+   */
+  private SegmentInfo segmentInfo;
+
+  /**
+   * Information about leaf nodes of all columns in this file
+   */
+  private List<BlockletInfo> blockletList;
+
+  /**
+   * blocklet index of all blocklets in this file
+   */
+  private BlockletIndex blockletIndex;
+
+  /**
+   * Description of columns in this file
+   */
+  private List<ColumnSchema> columnInTable;
+
+  /**
+   * to store the block info detail like file name block index and locations
+   */
+  private TableBlockInfo tableBlockInfo;
+
+  /**
+   * @return the versionId
+   */
+  public int getVersionId() {
+    return versionId;
+  }
+
+  /**
+   * @param versionId the versionId to set
+   */
+  public void setVersionId(int versionId) {
+    this.versionId = versionId;
+  }
+
+  /**
+   * @return the numberOfRows
+   */
+  public long getNumberOfRows() {
+    return numberOfRows;
+  }
+
+  /**
+   * @param numberOfRows the numberOfRows to set
+   */
+  public void setNumberOfRows(long numberOfRows) {
+    this.numberOfRows = numberOfRows;
+  }
+
+  /**
+   * @return the segmentInfo
+   */
+  public SegmentInfo getSegmentInfo() {
+    return segmentInfo;
+  }
+
+  /**
+   * @param segmentInfo the segmentInfo to set
+   */
+  public void setSegmentInfo(SegmentInfo segmentInfo) {
+    this.segmentInfo = segmentInfo;
+  }
+
+  /**
+   * @return the List of Blocklet
+   */
+  public List<BlockletInfo> getBlockletList() {
+    return blockletList;
+  }
+
+  /**
+   * @param blockletList the blockletList to set
+   */
+  public void setBlockletList(List<BlockletInfo> blockletList) {
+    this.blockletList = blockletList;
+  }
+
+  /**
+   * @return the blockletIndex
+   */
+  public BlockletIndex getBlockletIndex() {
+    return blockletIndex;
+  }
+
+  /**
+   * @param blockletIndex the blockletIndex to set
+   */
+  public void setBlockletIndex(BlockletIndex blockletIndex) {
+    this.blockletIndex = blockletIndex;
+  }
+
+  /**
+   * @return the columnInTable
+   */
+  public List<ColumnSchema> getColumnInTable() {
+    return columnInTable;
+  }
+
+  /**
+   * @param columnInTable the columnInTable to set
+   */
+  public void setColumnInTable(List<ColumnSchema> columnInTable) {
+    this.columnInTable = columnInTable;
+  }
+
+  /**
+   * @return the tableBlockInfo
+   */
+  public TableBlockInfo getTableBlockInfo() {
+    return tableBlockInfo;
+  }
+
+  /**
+   * @param tableBlockInfo the tableBlockInfo to set
+   */
+  public void setTableBlockInfo(TableBlockInfo tableBlockInfo) {
+    this.tableBlockInfo = tableBlockInfo;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/SegmentInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/SegmentInfo.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/SegmentInfo.java
new file mode 100644
index 0000000..c0d6b55
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/SegmentInfo.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.blocklet;
+
+import java.io.Serializable;
+
+/**
+ * Class holds the information about the segment information
+ */
+public class SegmentInfo implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = -1749874611112709431L;
+
+  /**
+   * number of column in the segment
+   */
+  private int numberOfColumns;
+
+  /**
+   * cardinality of each columns
+   * column which is not participating in the multidimensional key cardinality will be -1;
+   */
+  private int[] columnCardinality;
+
+  /**
+   * @return the numberOfColumns
+   */
+  public int getNumberOfColumns() {
+    return numberOfColumns;
+  }
+
+  /**
+   * @param numberOfColumns the numberOfColumns to set
+   */
+  public void setNumberOfColumns(int numberOfColumns) {
+    this.numberOfColumns = numberOfColumns;
+  }
+
+  /**
+   * @return the columnCardinality
+   */
+  public int[] getColumnCardinality() {
+    return columnCardinality;
+  }
+
+  /**
+   * @param columnCardinality the columnCardinality to set
+   */
+  public void setColumnCardinality(int[] columnCardinality) {
+    this.columnCardinality = columnCardinality;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/compressor/ChunkCompressorMeta.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/compressor/ChunkCompressorMeta.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/compressor/ChunkCompressorMeta.java
new file mode 100644
index 0000000..4f1ae37
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/compressor/ChunkCompressorMeta.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.metadata.blocklet.compressor;
+
+import java.io.Serializable;
+
+/**
+ * Represents the compression information of data of one dimension
+ * one dimension group in one blocklet
+ */
+public class ChunkCompressorMeta implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = -6697087170420991140L;
+
+  /**
+   * data chunk compressor
+   */
+  private CompressionCodec compressor;
+
+  /**
+   * total byte size of all uncompressed pages in this column chunk (including the headers)
+   */
+  private long uncompressedSize;
+
+  /**
+   * total byte size of all compressed pages in this column chunk (including the headers)
+   */
+  private long compressedSize;
+
+  /**
+   * @return the compressor
+   */
+  public CompressionCodec getCompressorCodec() {
+    return compressor;
+  }
+
+  /**
+   * @param compressor the compressor to set
+   */
+  public void setCompressor(CompressionCodec compressor) {
+    this.compressor = compressor;
+  }
+
+  /**
+   * @return the uncompressedSize
+   */
+  public long getUncompressedSize() {
+    return uncompressedSize;
+  }
+
+  /**
+   * @param uncompressedSize the uncompressedSize to set
+   */
+  public void setUncompressedSize(long uncompressedSize) {
+    this.uncompressedSize = uncompressedSize;
+  }
+
+  /**
+   * @return the compressedSize
+   */
+  public long getCompressedSize() {
+    return compressedSize;
+  }
+
+  /**
+   * @param compressedSize the compressedSize to set
+   */
+  public void setCompressedSize(long compressedSize) {
+    this.compressedSize = compressedSize;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/compressor/CompressionCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/compressor/CompressionCodec.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/compressor/CompressionCodec.java
new file mode 100644
index 0000000..6f302ec
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/compressor/CompressionCodec.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.metadata.blocklet.compressor;
+
+/**
+ * Compressions supported by Carbon Data.
+ */
+public enum CompressionCodec {
+
+  /**
+   * snappy compression
+   */
+  SNAPPY,
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/datachunk/DataChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/datachunk/DataChunk.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/datachunk/DataChunk.java
new file mode 100644
index 0000000..584e51f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/datachunk/DataChunk.java
@@ -0,0 +1,327 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.blocklet.datachunk;
+
+import java.io.Serializable;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.blocklet.compressor.ChunkCompressorMeta;
+import org.apache.carbondata.core.carbon.metadata.blocklet.sort.SortState;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.metadata.ValueEncoderMeta;
+
+/**
+ * Class holds the information about the data chunk metadata
+ */
+public class DataChunk implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * the compression meta data of a chunk
+   */
+  private ChunkCompressorMeta chunkCompressionMeta;
+
+  /**
+   * whether this chunk is a row chunk or column chunk
+   */
+  private boolean isRowMajor;
+
+  /**
+   * the column IDs in this chunk, will have atleast
+   * one column ID for columnar format, many column ID for
+   * row major format
+   */
+  private List<Integer> columnUniqueIdList;
+
+  /**
+   * Offset of data page
+   */
+  private long dataPageOffset;
+
+  /**
+   * length of data page
+   */
+  private int dataPageLength;
+
+  /**
+   * information about presence of values in each row of this column chunk
+   */
+  private transient PresenceMeta nullValueIndexForColumn;
+
+  /**
+   * offset of row id page, only if encoded using inverted index
+   */
+  private long rowIdPageOffset;
+
+  /**
+   * length of row id page, only if encoded using inverted index
+   */
+  private int rowIdPageLength;
+
+  /**
+   * offset of rle page, only if RLE coded.
+   */
+  private long rlePageOffset;
+
+  /**
+   * length of rle page, only if RLE coded.
+   */
+  private int rlePageLength;
+
+  /**
+   * is rle is applied in the data chunk
+   */
+  private boolean isRleApplied;
+
+  /**
+   * is dictionary is applied in the column, only if it is a dimension column
+   */
+  private boolean isNoDictonaryColumn;
+
+  /**
+   * sorting type selected for chunk;
+   */
+  private SortState sortState;
+
+  /**
+   * The List of encoders overriden at node level
+   */
+  private List<Encoding> encodingList;
+
+  /**
+   * value encoder meta which will holds the information
+   * about max, min, decimal length, type
+   */
+  private List<ValueEncoderMeta> valueEncoderMetaList;
+
+  /**
+   * @return the chunkCompressionMeta
+   */
+  public ChunkCompressorMeta getChunkCompressionMeta() {
+    return chunkCompressionMeta;
+  }
+
+  /**
+   * @param chunkCompressionMeta the chunkCompressionMeta to set
+   */
+  public void setChunkCompressionMeta(ChunkCompressorMeta chunkCompressionMeta) {
+    this.chunkCompressionMeta = chunkCompressionMeta;
+  }
+
+  /**
+   * @return the isRowMajor
+   */
+  public boolean isRowMajor() {
+    return isRowMajor;
+  }
+
+  /**
+   * @param isRowMajor the isRowMajor to set
+   */
+  public void setRowMajor(boolean isRowMajor) {
+    this.isRowMajor = isRowMajor;
+  }
+
+  /**
+   * @return the columnUniqueIdList
+   */
+  public List<Integer> getColumnUniqueIdList() {
+    return columnUniqueIdList;
+  }
+
+  /**
+   * @param columnUniqueIdList the columnUniqueIdList to set
+   */
+  public void setColumnUniqueIdList(List<Integer> columnUniqueIdList) {
+    this.columnUniqueIdList = columnUniqueIdList;
+  }
+
+  /**
+   * @return the dataPageOffset
+   */
+  public long getDataPageOffset() {
+    return dataPageOffset;
+  }
+
+  /**
+   * @param dataPageOffset the dataPageOffset to set
+   */
+  public void setDataPageOffset(long dataPageOffset) {
+    this.dataPageOffset = dataPageOffset;
+  }
+
+  /**
+   * @return the dataPageLength
+   */
+  public int getDataPageLength() {
+    return dataPageLength;
+  }
+
+  /**
+   * @param dataPageLength the dataPageLength to set
+   */
+  public void setDataPageLength(int dataPageLength) {
+    this.dataPageLength = dataPageLength;
+  }
+
+  /**
+   * @return the nullValueIndexForColumn
+   */
+  public PresenceMeta getNullValueIndexForColumn() {
+    return nullValueIndexForColumn;
+  }
+
+  /**
+   * @param nullValueIndexForColumn the nullValueIndexForColumn to set
+   */
+  public void setNullValueIndexForColumn(PresenceMeta nullValueIndexForColumn) {
+    this.nullValueIndexForColumn = nullValueIndexForColumn;
+  }
+
+  /**
+   * @return the rowIdPageOffset
+   */
+  public long getRowIdPageOffset() {
+    return rowIdPageOffset;
+  }
+
+  /**
+   * @param rowIdPageOffset the rowIdPageOffset to set
+   */
+  public void setRowIdPageOffset(long rowIdPageOffset) {
+    this.rowIdPageOffset = rowIdPageOffset;
+  }
+
+  /**
+   * @return the rowIdPageLength
+   */
+  public int getRowIdPageLength() {
+    return rowIdPageLength;
+  }
+
+  /**
+   * @param rowIdPageLength the rowIdPageLength to set
+   */
+  public void setRowIdPageLength(int rowIdPageLength) {
+    this.rowIdPageLength = rowIdPageLength;
+  }
+
+  /**
+   * @return the rlePageOffset
+   */
+  public long getRlePageOffset() {
+    return rlePageOffset;
+  }
+
+  /**
+   * @param rlePageOffset the rlePageOffset to set
+   */
+  public void setRlePageOffset(long rlePageOffset) {
+    this.rlePageOffset = rlePageOffset;
+  }
+
+  /**
+   * @return the rlePageLength
+   */
+  public int getRlePageLength() {
+    return rlePageLength;
+  }
+
+  /**
+   * @param rlePageLength the rlePageLength to set
+   */
+  public void setRlePageLength(int rlePageLength) {
+    this.rlePageLength = rlePageLength;
+  }
+
+  /**
+   * @return the isRleApplied
+   */
+  public boolean isRleApplied() {
+    return isRleApplied;
+  }
+
+  /**
+   * @param isRleApplied the isRleApplied to set
+   */
+  public void setRleApplied(boolean isRleApplied) {
+    this.isRleApplied = isRleApplied;
+  }
+
+  /**
+   * @return the isNoDictonaryColumn
+   */
+  public boolean isNoDictonaryColumn() {
+    return isNoDictonaryColumn;
+  }
+
+  /**
+   * @param isNoDictonaryColumn the isNoDictonaryColumn to set
+   */
+  public void setNoDictonaryColumn(boolean isNoDictonaryColumn) {
+    this.isNoDictonaryColumn = isNoDictonaryColumn;
+  }
+
+  /**
+   * @return the sortState
+   */
+  public SortState getSortState() {
+    return sortState;
+  }
+
+  /**
+   * @param sortState the sortState to set
+   */
+  public void setSortState(SortState sortState) {
+    this.sortState = sortState;
+  }
+
+  /**
+   * @return the encoderList
+   */
+  public List<Encoding> getEncodingList() {
+    return encodingList;
+  }
+
+  /**
+   * @param encoderList the encoderList to set
+   */
+  public void setEncoderList(List<Encoding> encodingList) {
+    this.encodingList = encodingList;
+  }
+
+  /**
+   * @return the valueEncoderMeta
+   */
+  public List<ValueEncoderMeta> getValueEncoderMeta() {
+    return valueEncoderMetaList;
+  }
+
+  /**
+   * @param valueEncoderMeta the valueEncoderMeta to set
+   */
+  public void setValueEncoderMeta(List<ValueEncoderMeta> valueEncoderMetaList) {
+    this.valueEncoderMetaList = valueEncoderMetaList;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/datachunk/PresenceMeta.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/datachunk/PresenceMeta.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/datachunk/PresenceMeta.java
new file mode 100644
index 0000000..f73d3d0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/datachunk/PresenceMeta.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.core.carbon.metadata.blocklet.datachunk;
+
+import java.util.BitSet;
+
+/**
+ * information about presence of values in each row of the column chunk
+ */
+public class PresenceMeta {
+
+  /**
+   * if true, ones in the bit stream reprents presence. otherwise represents absence
+   */
+  private boolean representNullValues;
+
+  /**
+   * Compressed bit stream representing the presence of null values
+   */
+  private BitSet bitSet;
+
+  /**
+   * @return the representNullValues
+   */
+  public boolean isRepresentNullValues() {
+    return representNullValues;
+  }
+
+  /**
+   * @param representNullValues the representNullValues to set
+   */
+  public void setRepresentNullValues(boolean representNullValues) {
+    this.representNullValues = representNullValues;
+  }
+
+  /**
+   * @return the bitSet
+   */
+  public BitSet getBitSet() {
+    return bitSet;
+  }
+
+  /**
+   * @param bitSet the bitSet to set
+   */
+  public void setBitSet(BitSet bitSet) {
+    this.bitSet = bitSet;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletBTreeIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletBTreeIndex.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletBTreeIndex.java
new file mode 100644
index 0000000..680b4eb
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletBTreeIndex.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.blocklet.index;
+
+import java.io.Serializable;
+
+/**
+ * Class hold the information about start and end key of one blocklet
+ */
+public class BlockletBTreeIndex implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = 6116185464700853045L;
+
+  /**
+   * Bit-packed start key of one blocklet
+   */
+  private byte[] startKey;
+
+  /**
+   * Bit-packed start key of one blocklet
+   */
+  private byte[] endKey;
+
+  public BlockletBTreeIndex() {
+  }
+
+  public BlockletBTreeIndex(byte[] startKey, byte[] endKey) {
+    this.startKey = startKey;
+    this.endKey = endKey;
+  }
+
+  /**
+   * @return the startKey
+   */
+  public byte[] getStartKey() {
+    return startKey;
+  }
+
+  /**
+   * @param startKey the startKey to set
+   */
+  public void setStartKey(byte[] startKey) {
+    this.startKey = startKey;
+  }
+
+  /**
+   * @return the endKey
+   */
+  public byte[] getEndKey() {
+    return endKey;
+  }
+
+  /**
+   * @param endKey the endKey to set
+   */
+  public void setEndKey(byte[] endKey) {
+    this.endKey = endKey;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletIndex.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletIndex.java
new file mode 100644
index 0000000..b78d2b9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletIndex.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.blocklet.index;
+
+import java.io.Serializable;
+
+/**
+ * Persist Index of all blocklets in one file
+ */
+public class BlockletIndex implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * list of btree index for all the leaf
+   */
+  private BlockletBTreeIndex btreeIndex;
+
+  /**
+   * list of max and min key of all leaf
+   */
+  private BlockletMinMaxIndex minMaxIndex;
+
+  public BlockletIndex() {
+  }
+
+  public BlockletIndex(BlockletBTreeIndex btree, BlockletMinMaxIndex minmax) {
+    this.btreeIndex = btree;
+    this.minMaxIndex = minmax;
+  }
+
+  /**
+   * @return the btreeIndex
+   */
+  public BlockletBTreeIndex getBtreeIndex() {
+    return btreeIndex;
+  }
+
+  /**
+   * @param btreeIndex the btreeIndex to set
+   */
+  public void setBtreeIndex(BlockletBTreeIndex btreeIndex) {
+    this.btreeIndex = btreeIndex;
+  }
+
+  /**
+   * @return the minMaxIndex
+   */
+  public BlockletMinMaxIndex getMinMaxIndex() {
+    return minMaxIndex;
+  }
+
+  /**
+   * @param minMaxIndex the minMaxIndex to set
+   */
+  public void setMinMaxIndex(BlockletMinMaxIndex minMaxIndex) {
+    this.minMaxIndex = minMaxIndex;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletMinMaxIndex.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletMinMaxIndex.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletMinMaxIndex.java
new file mode 100644
index 0000000..928884f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/index/BlockletMinMaxIndex.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.blocklet.index;
+
+import java.io.Serializable;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+/**
+ * Below class holds the information of max and min value of all the columns in a blocklet
+ */
+public class BlockletMinMaxIndex implements Serializable {
+
+  /**
+   * serialization version
+   */
+  private static final long serialVersionUID = -4311405145501302895L;
+
+  /**
+   * Min value of all columns of one blocklet Bit-Packed
+   */
+  private byte[][] minValues;
+
+  /**
+   * Max value of all columns of one blocklet Bit-Packed
+   */
+  private byte[][] maxValues;
+
+  public BlockletMinMaxIndex() {
+  }
+
+  public BlockletMinMaxIndex(List<ByteBuffer> minValues, List<ByteBuffer> maxValues) {
+    this.minValues = new byte[minValues.size()][];
+    this.maxValues = new byte[maxValues.size()][];
+    for (int i = 0; i < minValues.size(); i++) {
+      this.minValues[i] = minValues.get(i).array();
+      this.maxValues[i] = maxValues.get(i).array();
+    }
+  }
+
+  /**
+   * @return the minValues
+   */
+  public byte[][] getMinValues() {
+    return minValues;
+  }
+
+  /**
+   * @param minValues the minValues to set
+   */
+  public void setMinValues(byte[][] minValues) {
+    this.minValues = minValues;
+  }
+
+  /**
+   * @return the maxValues
+   */
+  public byte[][] getMaxValues() {
+    return maxValues;
+  }
+
+  /**
+   * @param maxValues the maxValues to set
+   */
+  public void setMaxValues(byte[][] maxValues) {
+    this.maxValues = maxValues;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/sort/SortState.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/sort/SortState.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/sort/SortState.java
new file mode 100644
index 0000000..122198c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/blocklet/sort/SortState.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.blocklet.sort;
+
+/**
+ * Enum for sort type information
+ */
+public enum SortState {
+
+  /**
+   * column is not sorted
+   */
+  SORT_NONE,
+
+  /**
+   * data from source was already in sorted order
+   */
+  SORT_NATIVE,
+
+  /**
+   * data from source was not sorted,so data is explicitly sorted
+   */
+  SORT_EXPLICT;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/core/carbon/metadata/converter/SchemaConverter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/metadata/converter/SchemaConverter.java b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/converter/SchemaConverter.java
new file mode 100644
index 0000000..c00c197
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/carbon/metadata/converter/SchemaConverter.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.core.carbon.metadata.converter;
+
+import org.apache.carbondata.core.carbon.metadata.schema.SchemaEvolution;
+import org.apache.carbondata.core.carbon.metadata.schema.SchemaEvolutionEntry;
+import org.apache.carbondata.core.carbon.metadata.schema.table.TableInfo;
+import org.apache.carbondata.core.carbon.metadata.schema.table.TableSchema;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
+
+/**
+ * Converter interface which will be implemented for external to carbon schema
+ */
+public interface SchemaConverter {
+  /**
+   * @param wrapperSchemaEvolutionEntry
+   * @return
+   */
+  org.apache.carbondata.format.SchemaEvolutionEntry fromWrapperToExternalSchemaEvolutionEntry(
+      SchemaEvolutionEntry wrapperSchemaEvolutionEntry);
+
+  /**
+   * @param wrapperSchemaEvolution
+   * @return
+   */
+  org.apache.carbondata.format.SchemaEvolution fromWrapperToExternalSchemaEvolution(
+      SchemaEvolution wrapperSchemaEvolution);
+
+  /**
+   * @param wrapperColumnSchema
+   * @return
+   */
+  org.apache.carbondata.format.ColumnSchema fromWrapperToExternalColumnSchema(
+      ColumnSchema wrapperColumnSchema);
+
+  /**
+   * @param wrapperTableSchema
+   * @return
+   */
+  org.apache.carbondata.format.TableSchema fromWrapperToExternalTableSchema(
+      TableSchema wrapperTableSchema);
+
+  /**
+   * @param wrapperTableInfo
+   * @param dbName
+   * @param tableName
+   * @return
+   */
+  org.apache.carbondata.format.TableInfo fromWrapperToExternalTableInfo(TableInfo wrapperTableInfo,
+      String dbName, String tableName);
+
+  /**
+   * @param externalSchemaEvolutionEntry
+   * @return
+   */
+  SchemaEvolutionEntry fromExternalToWrapperSchemaEvolutionEntry(
+      org.apache.carbondata.format.SchemaEvolutionEntry externalSchemaEvolutionEntry);
+
+  /**
+   * @param externalSchemaEvolution
+   * @return
+   */
+  SchemaEvolution fromExternalToWrapperSchemaEvolution(
+      org.apache.carbondata.format.SchemaEvolution externalSchemaEvolution);
+
+  /**
+   * @param externalColumnSchema
+   * @return
+   */
+  ColumnSchema fromExternalToWrapperColumnSchema(
+      org.apache.carbondata.format.ColumnSchema externalColumnSchema);
+
+  /**
+   * @param externalTableSchema
+   * @param tableNam
+   * @return
+   */
+  TableSchema fromExternalToWrapperTableSchema(
+      org.apache.carbondata.format.TableSchema externalTableSchema, String tableNam);
+
+  /**
+   * @param externalTableInfo
+   * @param dbName
+   * @param tableName
+   * @return
+   */
+  TableInfo fromExternalToWrapperTableInfo(org.apache.carbondata.format.TableInfo externalTableInfo,
+      String dbName, String tableName, String storePath);
+}


[04/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/IncludeColGroupFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/IncludeColGroupFilterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/IncludeColGroupFilterExecuterImpl.java
deleted file mode 100644
index a6d8b7d..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/IncludeColGroupFilterExecuterImpl.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.keygenerator.KeyGenException;
-import org.carbondata.core.keygenerator.KeyGenerator;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-import org.carbondata.scan.executor.util.QueryUtil;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-
-/**
- * It checks if filter is required on given block and if required, it does
- * linear search on block data and set the bitset.
- */
-public class IncludeColGroupFilterExecuterImpl extends IncludeFilterExecuterImpl {
-
-  /**
-   * LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(IncludeColGroupFilterExecuterImpl.class.getName());
-
-  /**
-   * @param dimColResolvedFilterInfo
-   * @param segmentProperties
-   */
-  public IncludeColGroupFilterExecuterImpl(DimColumnResolvedFilterInfo dimColResolvedFilterInfo,
-      SegmentProperties segmentProperties) {
-    super(dimColResolvedFilterInfo, segmentProperties);
-  }
-
-  /**
-   * It fills BitSet with row index which matches filter key
-   */
-  protected BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-
-    try {
-      KeyStructureInfo keyStructureInfo = getKeyStructureInfo();
-      byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-      for (int i = 0; i < filterValues.length; i++) {
-        byte[] filterVal = filterValues[i];
-        for (int rowId = 0; rowId < numerOfRows; rowId++) {
-          byte[] colData = new byte[keyStructureInfo.getMaskByteRanges().length];
-          dimensionColumnDataChunk.fillChunkData(colData, 0, rowId, keyStructureInfo);
-          if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, colData) == 0) {
-            bitSet.set(rowId);
-          }
-        }
-      }
-
-    } catch (Exception e) {
-      LOGGER.error(e);
-    }
-
-    return bitSet;
-  }
-
-  /**
-   * It is required for extracting column data from columngroup chunk
-   *
-   * @return
-   * @throws KeyGenException
-   */
-  private KeyStructureInfo getKeyStructureInfo() throws KeyGenException {
-    int colGrpId = getColumnGroupId(dimColumnEvaluatorInfo.getColumnIndex());
-    KeyGenerator keyGenerator = segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
-    List<Integer> mdKeyOrdinal = new ArrayList<Integer>();
-    mdKeyOrdinal.add(getMdkeyOrdinal(dimColumnEvaluatorInfo.getColumnIndex(), colGrpId));
-    int[] maskByteRanges = QueryUtil.getMaskedByteRangeBasedOrdinal(mdKeyOrdinal, keyGenerator);
-    byte[] maxKey = QueryUtil.getMaxKeyBasedOnOrinal(mdKeyOrdinal, keyGenerator);
-    int[] maksedByte = QueryUtil.getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
-    KeyStructureInfo restructureInfos = new KeyStructureInfo();
-    restructureInfos.setKeyGenerator(keyGenerator);
-    restructureInfos.setMaskByteRanges(maskByteRanges);
-    restructureInfos.setMaxKey(maxKey);
-    restructureInfos.setMaskedBytes(maksedByte);
-    return restructureInfos;
-  }
-
-  /**
-   * Check if scan is required on given block based on min and max value
-   */
-  public BitSet isScanRequired(byte[][] blkMaxVal, byte[][] blkMinVal) {
-    BitSet bitSet = new BitSet(1);
-    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    int columnIndex = dimColumnEvaluatorInfo.getColumnIndex();
-    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping().get(columnIndex);
-    int[] cols = getAllColumns(columnIndex);
-    byte[] maxValue = getMinMaxData(cols, blkMaxVal[blockIndex], columnIndex);
-    byte[] minValue = getMinMaxData(cols, blkMinVal[blockIndex], columnIndex);
-    boolean isScanRequired = false;
-    for (int k = 0; k < filterValues.length; k++) {
-      // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
-      // so filter-max should be negative
-      int maxCompare = ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], maxValue);
-      // and filter-min should be positive
-      int minCompare = ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], minValue);
-
-      // if any filter value is in range than this block needs to be
-      // scanned
-      if (maxCompare <= 0 && minCompare >= 0) {
-        isScanRequired = true;
-        break;
-      }
-    }
-    if (isScanRequired) {
-      bitSet.set(0);
-    }
-    return bitSet;
-  }
-
-  /**
-   * It extract min and max data for given column from stored min max value
-   *
-   * @param colGrpColumns
-   * @param minMaxData
-   * @param columnIndex
-   * @return
-   */
-  private byte[] getMinMaxData(int[] colGrpColumns, byte[] minMaxData, int columnIndex) {
-    int startIndex = 0;
-    int endIndex = 0;
-    if (null != colGrpColumns) {
-      for (int i = 0; i < colGrpColumns.length; i++) {
-        int colGrpId = getColumnGroupId(colGrpColumns[i]);
-        int mdKeyOrdinal = getMdkeyOrdinal(colGrpColumns[i], colGrpId);
-        int[] byteRange = getKeyGenerator(colGrpId).getKeyByteOffsets(mdKeyOrdinal);
-        int colSize = 0;
-        for (int j = byteRange[0]; j <= byteRange[1]; j++) {
-          colSize++;
-        }
-        if (colGrpColumns[i] == columnIndex) {
-          endIndex = startIndex + colSize;
-          break;
-        }
-        startIndex += colSize;
-      }
-    }
-    byte[] data = new byte[endIndex - startIndex];
-    System.arraycopy(minMaxData, startIndex, data, 0, data.length);
-    return data;
-  }
-
-  /**
-   * It returns column groups which have provided column ordinal
-   *
-   * @param columnIndex
-   * @return column group array
-   */
-  private int[] getAllColumns(int columnIndex) {
-    int[][] colGroups = segmentProperties.getColumnGroups();
-    for (int i = 0; i < colGroups.length; i++) {
-      if (QueryUtil.searchInArray(colGroups[i], columnIndex)) {
-        return colGroups[i];
-      }
-    }
-    return null;
-  }
-
-  private int getMdkeyOrdinal(int ordinal, int colGrpId) {
-    return segmentProperties.getColumnGroupMdKeyOrdinal(colGrpId, ordinal);
-  }
-
-  private int getColumnGroupId(int ordinal) {
-    int[][] columnGroups = segmentProperties.getColumnGroups();
-    int colGrpId = -1;
-    for (int i = 0; i < columnGroups.length; i++) {
-      if (columnGroups[i].length > 1) {
-        colGrpId++;
-        if (QueryUtil.searchInArray(columnGroups[i], ordinal)) {
-          break;
-        }
-      }
-    }
-    return colGrpId;
-  }
-
-  public KeyGenerator getKeyGenerator(int colGrpId) {
-    return segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/IncludeFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/IncludeFilterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/IncludeFilterExecuterImpl.java
deleted file mode 100644
index 14a4c3b..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/IncludeFilterExecuterImpl.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.BitSet;
-import java.util.List;
-
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.VariableLengthDimensionDataChunk;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public class IncludeFilterExecuterImpl implements FilterExecuter {
-
-  protected DimColumnResolvedFilterInfo dimColumnEvaluatorInfo;
-  protected DimColumnExecuterFilterInfo dimColumnExecuterInfo;
-  protected SegmentProperties segmentProperties;
-
-  public IncludeFilterExecuterImpl(DimColumnResolvedFilterInfo dimColumnEvaluatorInfo,
-      SegmentProperties segmentProperties) {
-    this.dimColumnEvaluatorInfo = dimColumnEvaluatorInfo;
-    this.segmentProperties = segmentProperties;
-    dimColumnExecuterInfo = new DimColumnExecuterFilterInfo();
-    FilterUtil.prepareKeysFromSurrogates(dimColumnEvaluatorInfo.getFilterValues(),
-        segmentProperties.getDimensionKeyGenerator(), dimColumnEvaluatorInfo.getDimension(),
-        dimColumnExecuterInfo);
-
-  }
-
-  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder) {
-    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
-        .get(dimColumnEvaluatorInfo.getColumnIndex());
-    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
-      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
-          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
-    }
-    return getFilteredIndexes(blockChunkHolder.getDimensionDataChunk()[blockIndex],
-        blockChunkHolder.getDataBlock().nodeSize());
-  }
-
-  protected BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows) {
-    if (dimensionColumnDataChunk.getAttributes().isNoDictionary()
-        && dimensionColumnDataChunk instanceof VariableLengthDimensionDataChunk) {
-      return setDirectKeyFilterIndexToBitSet(
-          (VariableLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows);
-    } else if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexes()
-        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      return setFilterdIndexToBitSetWithColumnIndex(
-          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows);
-    }
-
-    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
-  }
-
-  private BitSet setDirectKeyFilterIndexToBitSet(
-      VariableLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    List<byte[]> listOfColumnarKeyBlockDataForNoDictionaryVals =
-        dimensionColumnDataChunk.getCompleteDataChunk();
-    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    int[] columnIndexArray = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
-    int[] columnReverseIndexArray =
-        dimensionColumnDataChunk.getAttributes().getInvertedIndexesReverse();
-    for (int i = 0; i < filterValues.length; i++) {
-      byte[] filterVal = filterValues[i];
-      if (null != listOfColumnarKeyBlockDataForNoDictionaryVals) {
-        if (null != columnIndexArray) {
-          for (int index : columnIndexArray) {
-            byte[] noDictionaryVal =
-                listOfColumnarKeyBlockDataForNoDictionaryVals.get(columnReverseIndexArray[index]);
-            if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, noDictionaryVal) == 0) {
-              bitSet.set(index);
-            }
-          }
-        } else if (null != columnReverseIndexArray) {
-          for (int index : columnReverseIndexArray) {
-            byte[] noDictionaryVal =
-                listOfColumnarKeyBlockDataForNoDictionaryVals.get(columnReverseIndexArray[index]);
-            if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, noDictionaryVal) == 0) {
-              bitSet.set(index);
-            }
-          }
-        } else {
-          for (int index = 0;
-               index < listOfColumnarKeyBlockDataForNoDictionaryVals.size(); index++) {
-            if (ByteUtil.UnsafeComparer.INSTANCE
-                .compareTo(filterVal, listOfColumnarKeyBlockDataForNoDictionaryVals.get(index))
-                == 0) {
-              bitSet.set(index);
-            }
-          }
-        }
-      }
-    }
-    return bitSet;
-
-  }
-
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    int[] columnIndex = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
-    int start = 0;
-    int last = 0;
-    int startIndex = 0;
-    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    for (int i = 0; i < filterValues.length; i++) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[i], false);
-      if (start < 0) {
-        continue;
-      }
-      bitSet.set(columnIndex[start]);
-      last = start;
-      for (int j = start + 1; j < numerOfRows; j++) {
-        if (ByteUtil.UnsafeComparer.INSTANCE
-            .compareTo(dimensionColumnDataChunk.getCompleteDataChunk(), j * filterValues[i].length,
-                filterValues[i].length, filterValues[i], 0, filterValues[i].length) == 0) {
-          bitSet.set(columnIndex[j]);
-          last++;
-        } else {
-          break;
-        }
-      }
-      startIndex = last;
-      if (startIndex >= numerOfRows) {
-        break;
-      }
-    }
-    return bitSet;
-  }
-
-  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      FixedLengthDimensionDataChunk fixedDimensionChunk =
-          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk;
-      byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-      for (int k = 0; k < filterValues.length; k++) {
-        for (int j = 0; j < numerOfRows; j++) {
-          if (ByteUtil.UnsafeComparer.INSTANCE
-              .compareTo(fixedDimensionChunk.getCompleteDataChunk(), j * filterValues[k].length,
-                  filterValues[k].length, filterValues[k], 0, filterValues[k].length) == 0) {
-            bitSet.set(j);
-          }
-        }
-      }
-    }
-    return bitSet;
-  }
-
-  public BitSet isScanRequired(byte[][] blkMaxVal, byte[][] blkMinVal) {
-    BitSet bitSet = new BitSet(1);
-    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    int columnIndex = dimColumnEvaluatorInfo.getColumnIndex();
-    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping().get(columnIndex);
-
-    boolean isScanRequired = false;
-    for (int k = 0; k < filterValues.length; k++) {
-      // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
-      // so filter-max should be negative
-      int maxCompare =
-          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blkMaxVal[blockIndex]);
-      // and filter-min should be positive
-      int minCompare =
-          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blkMinVal[blockIndex]);
-
-      // if any filter value is in range than this block needs to be
-      // scanned
-      if (maxCompare <= 0 && minCompare >= 0) {
-        isScanRequired = true;
-        break;
-      }
-    }
-    if (isScanRequired) {
-      bitSet.set(0);
-    }
-    return bitSet;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/OrFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/OrFilterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/OrFilterExecuterImpl.java
deleted file mode 100644
index 38938e6..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/OrFilterExecuterImpl.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.BitSet;
-
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public class OrFilterExecuterImpl implements FilterExecuter {
-
-  private FilterExecuter leftExecuter;
-  private FilterExecuter rightExecuter;
-
-  public OrFilterExecuterImpl(FilterExecuter leftExecuter, FilterExecuter rightExecuter) {
-    this.leftExecuter = leftExecuter;
-    this.rightExecuter = rightExecuter;
-  }
-
-  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
-      throws FilterUnsupportedException {
-    BitSet leftFilters = leftExecuter.applyFilter(blockChunkHolder);
-    BitSet rightFilters = rightExecuter.applyFilter(blockChunkHolder);
-    leftFilters.or(rightFilters);
-
-    return leftFilters;
-  }
-
-  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
-    BitSet leftFilters = leftExecuter.isScanRequired(blockMaxValue, blockMinValue);
-    BitSet rightFilters = rightExecuter.isScanRequired(blockMaxValue, blockMinValue);
-    leftFilters.or(rightFilters);
-    return leftFilters;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/RestructureFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/RestructureFilterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/RestructureFilterExecuterImpl.java
deleted file mode 100644
index 70a6ff0..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/RestructureFilterExecuterImpl.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.BitSet;
-
-import org.carbondata.core.keygenerator.KeyGenerator;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-
-public class RestructureFilterExecuterImpl implements FilterExecuter {
-
-  DimColumnExecuterFilterInfo dimColumnExecuterInfo;
-
-  public RestructureFilterExecuterImpl(DimColumnResolvedFilterInfo dimColumnResolvedFilterInfo,
-      KeyGenerator blockKeyGenerator) {
-    dimColumnExecuterInfo = new DimColumnExecuterFilterInfo();
-    FilterUtil
-        .prepareKeysFromSurrogates(dimColumnResolvedFilterInfo.getFilterValues(), blockKeyGenerator,
-            dimColumnResolvedFilterInfo.getDimension(), dimColumnExecuterInfo);
-  }
-
-  @Override public BitSet applyFilter(BlocksChunkHolder blocksChunkHolder) {
-    BitSet bitSet = new BitSet(blocksChunkHolder.getDataBlock().nodeSize());
-    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
-    if (null != filterValues && filterValues.length > 0) {
-      bitSet.set(0, blocksChunkHolder.getDataBlock().nodeSize());
-    }
-    return bitSet;
-  }
-
-  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
-    BitSet bitSet = new BitSet(1);
-    bitSet.set(0);
-    return bitSet;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelFilterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelFilterExecuterImpl.java
deleted file mode 100644
index ec4ede2..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelFilterExecuterImpl.java
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.List;
-import java.util.Map;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.chunk.impl.VariableLengthDimensionDataChunk;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.keygenerator.KeyGenException;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.core.util.DataTypeUtil;
-import org.carbondata.scan.executor.exception.QueryExecutionException;
-import org.carbondata.scan.executor.infos.KeyStructureInfo;
-import org.carbondata.scan.executor.util.QueryUtil;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.exception.FilterIllegalMemberException;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.GenericQueryType;
-import org.carbondata.scan.filter.intf.RowImpl;
-import org.carbondata.scan.filter.intf.RowIntf;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public class RowLevelFilterExecuterImpl implements FilterExecuter {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(RowLevelFilterExecuterImpl.class.getName());
-  protected List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList;
-  protected List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList;
-  protected Expression exp;
-  protected AbsoluteTableIdentifier tableIdentifier;
-  protected SegmentProperties segmentProperties;
-  /**
-   * it has index at which given dimension is stored in file
-   */
-  private int[] blocksIndex;
-
-  private Map<Integer, GenericQueryType> complexDimensionInfoMap;
-
-  public RowLevelFilterExecuterImpl(List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
-      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
-      AbsoluteTableIdentifier tableIdentifier, SegmentProperties segmentProperties,
-      Map<Integer, GenericQueryType> complexDimensionInfoMap) {
-    this.dimColEvaluatorInfoList = dimColEvaluatorInfoList;
-    this.segmentProperties = segmentProperties;
-    this.blocksIndex = new int[dimColEvaluatorInfoList.size()];
-    for (int i = 0; i < dimColEvaluatorInfoList.size(); i++) {
-      this.blocksIndex[i] = segmentProperties.getDimensionOrdinalToBlockMapping()
-          .get(dimColEvaluatorInfoList.get(i).getColumnIndex());
-    }
-    if (null == msrColEvalutorInfoList) {
-      this.msrColEvalutorInfoList = new ArrayList<MeasureColumnResolvedFilterInfo>(20);
-    } else {
-      this.msrColEvalutorInfoList = msrColEvalutorInfoList;
-    }
-    this.exp = exp;
-    this.tableIdentifier = tableIdentifier;
-    this.complexDimensionInfoMap = complexDimensionInfoMap;
-  }
-
-  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
-      throws FilterUnsupportedException {
-    for (int i = 0; i < dimColEvaluatorInfoList.size(); i++) {
-      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo = dimColEvaluatorInfoList.get(i);
-      if (dimColumnEvaluatorInfo.getDimension().getDataType() != DataType.ARRAY
-          && dimColumnEvaluatorInfo.getDimension().getDataType() != DataType.STRUCT) {
-        if (null == blockChunkHolder.getDimensionDataChunk()[blocksIndex[i]]) {
-          blockChunkHolder.getDimensionDataChunk()[blocksIndex[i]] = blockChunkHolder.getDataBlock()
-              .getDimensionChunk(blockChunkHolder.getFileReader(), blocksIndex[i]);
-        }
-      } else {
-        GenericQueryType complexType = complexDimensionInfoMap.get(blocksIndex[i]);
-        complexType.fillRequiredBlockData(blockChunkHolder);
-      }
-    }
-
-    // CHECKSTYLE:OFF Approval No:Approval-V1R2C10_001
-    if (null != msrColEvalutorInfoList) {
-      for (MeasureColumnResolvedFilterInfo msrColumnEvalutorInfo : msrColEvalutorInfoList) {
-        if (msrColumnEvalutorInfo.isMeasureExistsInCurrentSlice() && null == blockChunkHolder
-            .getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()]) {
-          blockChunkHolder.getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()] =
-              blockChunkHolder.getDataBlock().getMeasureChunk(blockChunkHolder.getFileReader(),
-                  msrColumnEvalutorInfo.getColumnIndex());
-        }
-      }
-    }
-    // CHECKSTYLE:ON
-
-    int numberOfRows = blockChunkHolder.getDataBlock().nodeSize();
-    BitSet set = new BitSet(numberOfRows);
-    RowIntf row = new RowImpl();
-    boolean invalidRowsPresent = false;
-    for (int index = 0; index < numberOfRows; index++) {
-      try {
-        createRow(blockChunkHolder, row, index);
-      } catch (QueryExecutionException e) {
-        FilterUtil.logError(e, invalidRowsPresent);
-      }
-      Boolean rslt = false;
-      try {
-        rslt = exp.evaluate(row).getBoolean();
-      }
-      // Any invalid member while evaluation shall be ignored, system will log the
-      // error only once since all rows the evaluation happens so inorder to avoid
-      // too much log inforation only once the log will be printed.
-      catch (FilterIllegalMemberException e) {
-        FilterUtil.logError(e, invalidRowsPresent);
-      }
-      if (null != rslt && rslt) {
-        set.set(index);
-      }
-    }
-    return set;
-  }
-
-  /**
-   * Method will read the members of particular dimension block and create
-   * a row instance for further processing of the filters
-   *
-   * @param blockChunkHolder
-   * @param row
-   * @param index
-   * @throws QueryExecutionException
-   */
-  private void createRow(BlocksChunkHolder blockChunkHolder, RowIntf row, int index)
-      throws QueryExecutionException {
-    Object[] record = new Object[dimColEvaluatorInfoList.size() + msrColEvalutorInfoList.size()];
-    String memberString = null;
-    for (int i = 0; i < dimColEvaluatorInfoList.size(); i++) {
-      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo = dimColEvaluatorInfoList.get(i);
-      if (dimColumnEvaluatorInfo.getDimension().getDataType() != DataType.ARRAY
-          && dimColumnEvaluatorInfo.getDimension().getDataType() != DataType.STRUCT) {
-        if (!dimColumnEvaluatorInfo.isDimensionExistsInCurrentSilce()) {
-          record[dimColumnEvaluatorInfo.getRowIndex()] = dimColumnEvaluatorInfo.getDefaultValue();
-        }
-        if (!dimColumnEvaluatorInfo.getDimension().hasEncoding(Encoding.DICTIONARY)
-            && blockChunkHolder
-            .getDimensionDataChunk()[blocksIndex[i]] instanceof VariableLengthDimensionDataChunk) {
-
-          VariableLengthDimensionDataChunk dimensionColumnDataChunk =
-              (VariableLengthDimensionDataChunk) blockChunkHolder
-                  .getDimensionDataChunk()[blocksIndex[i]];
-          if (null != dimensionColumnDataChunk.getCompleteDataChunk()) {
-            memberString =
-                readMemberBasedOnNoDictionaryVal(dimColumnEvaluatorInfo, dimensionColumnDataChunk,
-                    index);
-            if (null != memberString) {
-              if (memberString.equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
-                memberString = null;
-              }
-            }
-            record[dimColumnEvaluatorInfo.getRowIndex()] = DataTypeUtil
-                .getDataBasedOnDataType(memberString,
-                    dimColumnEvaluatorInfo.getDimension().getDataType());
-          } else {
-            continue;
-          }
-        } else {
-          int dictionaryValue =
-              readSurrogatesFromColumnBlock(blockChunkHolder, index, dimColumnEvaluatorInfo,
-                  blocksIndex[i]);
-          Dictionary forwardDictionary = null;
-          if (dimColumnEvaluatorInfo.getDimension().hasEncoding(Encoding.DICTIONARY)
-              && !dimColumnEvaluatorInfo.getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-            memberString =
-                getFilterActualValueFromDictionaryValue(dimColumnEvaluatorInfo, dictionaryValue,
-                    forwardDictionary);
-            record[dimColumnEvaluatorInfo.getRowIndex()] = DataTypeUtil
-                .getDataBasedOnDataType(memberString,
-                    dimColumnEvaluatorInfo.getDimension().getDataType());
-          } else if (dimColumnEvaluatorInfo.getDimension()
-              .hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-
-            Object member = getFilterActualValueFromDirectDictionaryValue(dimColumnEvaluatorInfo,
-                dictionaryValue);
-            record[dimColumnEvaluatorInfo.getRowIndex()] = member;
-          }
-        }
-      } else {
-        try {
-          GenericQueryType complexType = complexDimensionInfoMap.get(blocksIndex[i]);
-          ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
-          DataOutputStream dataOutputStream = new DataOutputStream(byteStream);
-          complexType
-              .parseBlocksAndReturnComplexColumnByteArray(blockChunkHolder.getDimensionDataChunk(),
-                  index, dataOutputStream);
-          record[dimColumnEvaluatorInfo.getRowIndex()] = complexType
-              .getDataBasedOnDataTypeFromSurrogates(ByteBuffer.wrap(byteStream.toByteArray()));
-          byteStream.close();
-        } catch (IOException e) {
-          LOGGER.info(e.getMessage());
-        }
-      }
-    }
-
-    DataType msrType;
-
-    for (MeasureColumnResolvedFilterInfo msrColumnEvalutorInfo : msrColEvalutorInfoList) {
-      switch (msrColumnEvalutorInfo.getType()) {
-        case INT:
-        case LONG:
-          msrType = DataType.LONG;
-          break;
-        case DECIMAL:
-          msrType = DataType.DECIMAL;
-          break;
-        default:
-          msrType = DataType.DOUBLE;
-      }
-      // if measure doesnt exist then set the default value.
-      if (!msrColumnEvalutorInfo.isMeasureExistsInCurrentSlice()) {
-        record[msrColumnEvalutorInfo.getRowIndex()] = msrColumnEvalutorInfo.getDefaultValue();
-      } else {
-        Object msrValue;
-        switch (msrType) {
-          case INT:
-          case LONG:
-            msrValue =
-                blockChunkHolder.getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()]
-                    .getMeasureDataHolder().getReadableLongValueByIndex(index);
-            break;
-          case DECIMAL:
-            msrValue =
-                blockChunkHolder.getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()]
-                    .getMeasureDataHolder().getReadableBigDecimalValueByIndex(index);
-            break;
-          default:
-            msrValue =
-                blockChunkHolder.getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()]
-                    .getMeasureDataHolder().getReadableDoubleValueByIndex(index);
-        }
-        record[msrColumnEvalutorInfo.getRowIndex()] =
-            blockChunkHolder.getMeasureDataChunk()[msrColumnEvalutorInfo.getColumnIndex()]
-                .getNullValueIndexHolder().getBitSet().get(index) ? null : msrValue;
-
-      }
-    }
-    row.setValues(record);
-  }
-
-  /**
-   * method will read the actual data from the direct dictionary generator
-   * by passing direct dictionary value.
-   *
-   * @param dimColumnEvaluatorInfo
-   * @param dictionaryValue
-   * @return
-   */
-  private Object getFilterActualValueFromDirectDictionaryValue(
-      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo, int dictionaryValue) {
-    Object memberString = null;
-    DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
-        .getDirectDictionaryGenerator(dimColumnEvaluatorInfo.getDimension().getDataType());
-    if (null != directDictionaryGenerator) {
-      memberString = directDictionaryGenerator.getValueFromSurrogate(dictionaryValue);
-    }
-    return memberString;
-  }
-
-  /**
-   * Read the actual filter member by passing the dictionary value from
-   * the forward dictionary cache which which holds column wise cache
-   *
-   * @param dimColumnEvaluatorInfo
-   * @param dictionaryValue
-   * @param forwardDictionary
-   * @return
-   * @throws QueryExecutionException
-   */
-  private String getFilterActualValueFromDictionaryValue(
-      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo, int dictionaryValue,
-      Dictionary forwardDictionary) throws QueryExecutionException {
-    String memberString;
-    try {
-      forwardDictionary = FilterUtil
-          .getForwardDictionaryCache(tableIdentifier, dimColumnEvaluatorInfo.getDimension());
-    } catch (QueryExecutionException e) {
-      throw new QueryExecutionException(e);
-    }
-
-    memberString = forwardDictionary.getDictionaryValueForKey(dictionaryValue);
-    if (null != memberString) {
-      if (memberString.equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
-        memberString = null;
-      }
-    }
-    return memberString;
-  }
-
-  /**
-   * read the filter member dictionary data from the block corresponding to
-   * applied filter column
-   *
-   * @param blockChunkHolder
-   * @param index
-   * @param dimColumnEvaluatorInfo
-   * @return
-   */
-  private int readSurrogatesFromColumnBlock(BlocksChunkHolder blockChunkHolder, int index,
-      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo, int blockIndex) {
-    if (dimColumnEvaluatorInfo.getDimension().isColumnar()) {
-      byte[] rawData = blockChunkHolder.getDimensionDataChunk()[blockIndex].getChunkData(index);
-      ByteBuffer byteBuffer = ByteBuffer.allocate(CarbonCommonConstants.INT_SIZE_IN_BYTE);
-      int dictionaryValue = CarbonUtil.getSurrogateKey(rawData, byteBuffer);
-      return dictionaryValue;
-    } else {
-      return readSurrogatesFromColumnGroupBlock(blockChunkHolder, index, dimColumnEvaluatorInfo,
-          blockIndex);
-    }
-
-  }
-
-  /**
-   * @param blockChunkHolder
-   * @param index
-   * @param dimColumnEvaluatorInfo
-   * @return read surrogate of given row of given column group dimension
-   */
-  private int readSurrogatesFromColumnGroupBlock(BlocksChunkHolder blockChunkHolder, int index,
-      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo, int blockIndex) {
-    try {
-      KeyStructureInfo keyStructureInfo =
-          QueryUtil.getKeyStructureInfo(segmentProperties, dimColumnEvaluatorInfo);
-      byte[] colData = blockChunkHolder.getDimensionDataChunk()[blockIndex].getChunkData(index);
-      long[] result = keyStructureInfo.getKeyGenerator().getKeyArray(colData);
-      int colGroupId =
-          QueryUtil.getColumnGroupId(segmentProperties, dimColumnEvaluatorInfo.getColumnIndex());
-      int dictionaryValue = (int) result[segmentProperties
-          .getColumnGroupMdKeyOrdinal(colGroupId, dimColumnEvaluatorInfo.getColumnIndex())];
-      return dictionaryValue;
-    } catch (KeyGenException e) {
-      LOGGER.error(e);
-    }
-    return 0;
-  }
-
-  /**
-   * Reading the blocks for no dictionary data, in no dictionary case
-   * directly the filter data will read, no need to scan the dictionary
-   * or read the dictionary value.
-   *
-   * @param dimColumnEvaluatorInfo
-   * @param dimensionColumnDataChunk
-   * @param index
-   * @return
-   */
-  private String readMemberBasedOnNoDictionaryVal(
-      DimColumnResolvedFilterInfo dimColumnEvaluatorInfo,
-      VariableLengthDimensionDataChunk dimensionColumnDataChunk, int index) {
-    byte[] noDictionaryVals;
-    if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexesReverse()) {
-      // Getting the data for direct surrogates.
-      noDictionaryVals = dimensionColumnDataChunk.getCompleteDataChunk()
-          .get(dimensionColumnDataChunk.getAttributes().getInvertedIndexesReverse()[index]);
-    } else {
-      noDictionaryVals = dimensionColumnDataChunk.getCompleteDataChunk().get(index);
-    }
-    return new String(noDictionaryVals, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
-  }
-
-  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
-    BitSet bitSet = new BitSet(1);
-    bitSet.set(0);
-    return bitSet;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
deleted file mode 100644
index 5c22566..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.BitSet;
-import java.util.List;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public class RowLevelRangeGrtThanFiterExecuterImpl extends RowLevelFilterExecuterImpl {
-  private byte[][] filterRangeValues;
-
-  public RowLevelRangeGrtThanFiterExecuterImpl(
-      List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
-      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
-      AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
-      SegmentProperties segmentProperties) {
-    super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
-        null);
-    this.filterRangeValues = filterRangeValues;
-  }
-
-  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
-    BitSet bitSet = new BitSet(1);
-    byte[][] filterValues = this.filterRangeValues;
-    int columnIndex = this.dimColEvaluatorInfoList.get(0).getColumnIndex();
-    boolean isScanRequired = false;
-    for (int k = 0; k < filterValues.length; k++) {
-      // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
-      // so filter-max should be negative
-      int maxCompare =
-          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMaxValue[columnIndex]);
-      // if any filter value is in range than this block needs to be
-      // scanned means always less than block max range.
-      if (maxCompare < 0) {
-        isScanRequired = true;
-        break;
-      }
-    }
-    if (isScanRequired) {
-      bitSet.set(0);
-    }
-    return bitSet;
-
-  }
-
-  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
-      throws FilterUnsupportedException {
-    if (!dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY)) {
-      return super.applyFilter(blockChunkHolder);
-    }
-    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
-        .get(dimColEvaluatorInfoList.get(0).getColumnIndex());
-    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
-      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
-          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
-    }
-    return getFilteredIndexes(blockChunkHolder.getDimensionDataChunk()[blockIndex],
-        blockChunkHolder.getDataBlock().nodeSize());
-  }
-
-  private BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows) {
-    if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexes()
-        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      return setFilterdIndexToBitSetWithColumnIndex(
-          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows);
-    }
-    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
-  }
-
-  /**
-   * Method will scan the block and finds the range start index from which all members
-   * will be considered for applying range filters. this method will be called if the
-   * column is not supported by default so column index mapping  will be present for
-   * accesing the members from the block.
-   *
-   * @param dimensionColumnDataChunk
-   * @param numerOfRows
-   * @return BitSet.
-   */
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    int[] columnIndex = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
-    int start = 0;
-    int last = 0;
-    int startIndex = 0;
-    byte[][] filterValues = this.filterRangeValues;
-    for (int i = 0; i < filterValues.length; i++) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[i], true);
-      if (start >= 0) {
-        start = CarbonUtil.nextGreaterValueToTarget(start,
-            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, filterValues[i], numerOfRows);
-      }
-      // Logic will handle the case where the range filter member is not present in block
-      // in this case the binary search will return the index from where the bit sets will be
-      // set inorder to apply filters. this is greater than filter so the range will be taken
-      // from the next element which is greater than filter member.
-      if (start < 0) {
-        start = -(start + 1);
-        if (start == numerOfRows) {
-          start = start - 1;
-        }
-        // Method will compare the tentative index value after binary search, this tentative
-        // index needs to be compared by the filter member if its > filter then from that
-        // index the bitset will be considered for filtering process.
-        if (ByteUtil
-            .compare(filterValues[i], dimensionColumnDataChunk.getChunkData(columnIndex[start]))
-            > 0) {
-          start = start + 1;
-        }
-      }
-
-      last = start;
-      for (int j = start; j < numerOfRows; j++) {
-        bitSet.set(columnIndex[j]);
-        last++;
-      }
-      startIndex = last;
-      if (startIndex >= numerOfRows) {
-        break;
-      }
-    }
-
-    return bitSet;
-  }
-
-  /**
-   * Method will scan the block and finds the range start index from which all
-   * members will be considered for applying range filters. this method will
-   * be called if the column is sorted default so column index
-   * mapping will be present for accesing the members from the block.
-   *
-   * @param dimensionColumnDataChunk
-   * @param numerOfRows
-   * @return BitSet.
-   */
-  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      int start = 0;
-      int last = 0;
-      int startIndex = 0;
-      byte[][] filterValues = this.filterRangeValues;
-      for (int k = 0; k < filterValues.length; k++) {
-        start = CarbonUtil.getFirstIndexUsingBinarySearch(
-            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-            filterValues[k], true);
-        start = CarbonUtil.nextGreaterValueToTarget(start,
-            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, filterValues[k], numerOfRows);
-        if (start < 0) {
-          start = -(start + 1);
-          if (start == numerOfRows) {
-            start = start - 1;
-          }
-          // Method will compare the tentative index value after binary search, this tentative
-          // index needs to be compared by the filter member if its > filter then from that
-          // index the bitset will be considered for filtering process.
-          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) > 0) {
-            start = start + 1;
-          }
-        }
-        last = start;
-        for (int j = start; j < numerOfRows; j++) {
-          bitSet.set(j);
-          last++;
-        }
-        startIndex = last;
-        if (startIndex >= numerOfRows) {
-          break;
-        }
-      }
-    }
-    return bitSet;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
deleted file mode 100644
index 0d857d5..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.BitSet;
-import java.util.List;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public class RowLevelRangeGrtrThanEquaToFilterExecuterImpl extends RowLevelFilterExecuterImpl {
-
-  protected byte[][] filterRangeValues;
-
-  public RowLevelRangeGrtrThanEquaToFilterExecuterImpl(
-      List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
-      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
-      AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
-      SegmentProperties segmentProperties) {
-    super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
-        null);
-    this.filterRangeValues = filterRangeValues;
-  }
-
-  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
-    BitSet bitSet = new BitSet(1);
-    byte[][] filterValues = this.filterRangeValues;
-    int columnIndex = this.dimColEvaluatorInfoList.get(0).getColumnIndex();
-    boolean isScanRequired = false;
-    for (int k = 0; k < filterValues.length; k++) {
-      // filter value should be in range of max and min value i.e
-      // max>filtervalue>min
-      // so filter-max should be negative
-      int maxCompare =
-          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMaxValue[columnIndex]);
-      // if any filter value is in range than this block needs to be
-      // scanned less than equal to max range.
-      if (maxCompare <= 0) {
-        isScanRequired = true;
-        break;
-      }
-    }
-    if (isScanRequired) {
-      bitSet.set(0);
-    }
-    return bitSet;
-
-  }
-
-  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
-      throws FilterUnsupportedException {
-    if (!dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY)) {
-      return super.applyFilter(blockChunkHolder);
-    }
-    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
-        .get(dimColEvaluatorInfoList.get(0).getColumnIndex());
-    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
-      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
-          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
-    }
-    return getFilteredIndexes(blockChunkHolder.getDimensionDataChunk()[blockIndex],
-        blockChunkHolder.getDataBlock().nodeSize());
-  }
-
-  private BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows) {
-    if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexes()
-        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      return setFilterdIndexToBitSetWithColumnIndex(
-          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows);
-    }
-    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows);
-  }
-
-  /**
-   * Method will scan the block and finds the range start index from which all members
-   * will be considered for applying range filters. this method will be called if the
-   * column is not supported by default so column index mapping  will be present for
-   * accesing the members from the block.
-   *
-   * @param dimensionColumnDataChunk
-   * @param numerOfRows
-   * @return BitSet.
-   */
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    int[] columnIndex = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
-    int start = 0;
-    int last = 0;
-    int startIndex = 0;
-    byte[][] filterValues = this.filterRangeValues;
-    for (int i = 0; i < filterValues.length; i++) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[i], false);
-      if (start < 0) {
-        start = -(start + 1);
-        if (start == numerOfRows) {
-          start = start - 1;
-        }
-        // Method will compare the tentative index value after binary search, this tentative
-        // index needs to be compared by the filter member if its >= filter then from that
-        // index the bitset will be considered for filtering process.
-        if (ByteUtil
-            .compare(filterValues[i], dimensionColumnDataChunk.getChunkData(columnIndex[start]))
-            >= 0) {
-          start = start + 1;
-        }
-      }
-      last = start;
-      for (int j = start; j < numerOfRows; j++) {
-
-        bitSet.set(columnIndex[j]);
-        last++;
-      }
-      startIndex = last;
-      if (startIndex >= numerOfRows) {
-        break;
-      }
-    }
-    return bitSet;
-  }
-
-  /**
-   * Method will scan the block and finds the range start index from which all
-   * members will be considered for applying range filters. this method will
-   * be called if the column is sorted default so column index
-   * mapping will be present for accesing the members from the block.
-   *
-   * @param dimensionColumnDataChunk
-   * @param numerOfRows
-   * @return BitSet.
-   */
-  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      int start = 0;
-      int last = 0;
-      int startIndex = 0;
-      byte[][] filterValues = this.filterRangeValues;
-      for (int k = 0; k < filterValues.length; k++) {
-        start = CarbonUtil.getFirstIndexUsingBinarySearch(
-            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-            filterValues[k], false);
-        if (start < 0) {
-          start = -(start + 1);
-          if (start == numerOfRows) {
-            start = start - 1;
-          }
-          // Method will compare the tentative index value after binary search, this tentative
-          // index needs to be compared by the filter member if its >= filter then from that
-          // index the bitset will be considered for filtering process.
-          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start))
-              >= 0) {
-            start = start + 1;
-          }
-        }
-
-        last = start;
-        for (int j = start; j < numerOfRows; j++) {
-          bitSet.set(j);
-          last++;
-        }
-        startIndex = last;
-        if (startIndex >= numerOfRows) {
-          break;
-        }
-      }
-    }
-    return bitSet;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
deleted file mode 100644
index b1ebf0a..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.BitSet;
-import java.util.List;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public class RowLevelRangeLessThanEqualFilterExecuterImpl extends RowLevelFilterExecuterImpl {
-  protected byte[][] filterRangeValues;
-
-  public RowLevelRangeLessThanEqualFilterExecuterImpl(
-      List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
-      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
-      AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
-      SegmentProperties segmentProperties) {
-    super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
-        null);
-    this.filterRangeValues = filterRangeValues;
-  }
-
-  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
-    BitSet bitSet = new BitSet(1);
-    byte[][] filterValues = this.filterRangeValues;
-    int columnIndex = this.dimColEvaluatorInfoList.get(0).getColumnIndex();
-    boolean isScanRequired = false;
-    for (int k = 0; k < filterValues.length; k++) {
-      // and filter-min should be positive
-      int minCompare =
-          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMinValue[columnIndex]);
-
-      // if any filter applied is not in range of min and max of block
-      // then since its a less than equal to fiter validate whether the block
-      // min range is less than equal to applied filter member
-      if (minCompare >= 0) {
-        isScanRequired = true;
-        break;
-      }
-    }
-    if (isScanRequired) {
-      bitSet.set(0);
-    }
-    return bitSet;
-
-  }
-
-  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
-      throws FilterUnsupportedException {
-    if (!dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY)) {
-      return super.applyFilter(blockChunkHolder);
-    }
-    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
-        .get(dimColEvaluatorInfoList.get(0).getColumnIndex());
-    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
-      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
-          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
-    }
-    return getFilteredIndexes(blockChunkHolder.getDimensionDataChunk()[blockIndex],
-        blockChunkHolder.getDataBlock().nodeSize());
-  }
-
-  private BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows) {
-    byte[] defaultValue = null;
-    if (dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-      DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
-          .getDirectDictionaryGenerator(
-              dimColEvaluatorInfoList.get(0).getDimension().getDataType());
-      int key = directDictionaryGenerator.generateDirectSurrogateKey(null) + 1;
-      defaultValue = FilterUtil.getMaskKey(key, dimColEvaluatorInfoList.get(0).getDimension(),
-          this.segmentProperties.getDimensionKeyGenerator());
-    }
-    if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexes()
-        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-
-      return setFilterdIndexToBitSetWithColumnIndex(
-          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows, defaultValue);
-
-    }
-    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows, defaultValue);
-  }
-
-  /**
-   * Method will scan the block and finds the range start index from which all members
-   * will be considered for applying range filters. this method will be called if the
-   * column is not supported by default so column index mapping  will be present for
-   * accesing the members from the block.
-   *
-   * @param dimensionColumnDataChunk
-   * @param numerOfRows
-   * @return BitSet.
-   */
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows,
-      byte[] defaultValue) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    int[] columnIndex = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
-    int start = 0;
-    int last = 0;
-    int skip = 0;
-    int startIndex = 0;
-    byte[][] filterValues = this.filterRangeValues;
-    //find the number of default values to skip the null value in case of direct dictionary
-    if (null != defaultValue) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              defaultValue, true);
-      if (start < 0) {
-        skip = -(start + 1);
-        // end of block
-        if (skip == numerOfRows) {
-          return bitSet;
-        }
-      } else {
-        skip = start;
-      }
-      startIndex = skip;
-    }
-    for (int i = 0; i < filterValues.length; i++) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[i], true);
-      if (start < 0) {
-        start = -(start + 1);
-        if (start == numerOfRows) {
-          start = start - 1;
-        }
-        // Method will compare the tentative index value after binary search, this tentative
-        // index needs to be compared by the filter member if its >= filter then from that
-        // index the bitset will be considered for filtering process.
-        if (ByteUtil
-            .compare(filterValues[i], dimensionColumnDataChunk.getChunkData(columnIndex[start]))
-            <= 0) {
-          start = start - 1;
-        }
-      }
-      last = start;
-      for (int j = start; j >= skip; j--) {
-        bitSet.set(columnIndex[j]);
-        last--;
-      }
-      startIndex = last;
-      if (startIndex <= 0) {
-        break;
-      }
-    }
-    return bitSet;
-  }
-
-  /**
-   * Method will scan the block and finds the range start index from which all
-   * members will be considered for applying range filters. this method will
-   * be called if the column is sorted default so column index
-   * mapping will be present for accesing the members from the block.
-   *
-   * @param dimensionColumnDataChunk
-   * @param numerOfRows
-   * @param defaultValue
-   * @return BitSet.
-   */
-  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows, byte[] defaultValue) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      int start = 0;
-      int last = 0;
-      int startIndex = 0;
-      byte[][] filterValues = this.filterRangeValues;
-      int skip = 0;
-      //find the number of default values to skip the null value in case of direct dictionary
-      if (null != defaultValue) {
-        start = CarbonUtil.getFirstIndexUsingBinarySearch(
-            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-            defaultValue, true);
-        if (start < 0) {
-          skip = -(start + 1);
-          // end of block
-          if (skip == numerOfRows) {
-            return bitSet;
-          }
-        } else {
-          skip = start;
-        }
-        startIndex = skip;
-      }
-      for (int k = 0; k < filterValues.length; k++) {
-        start = CarbonUtil.getFirstIndexUsingBinarySearch(
-            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-            filterValues[k], true);
-        if (start < 0) {
-          start = -(start + 1);
-          if (start == numerOfRows) {
-            start = start - 1;
-          }
-          // Method will compare the tentative index value after binary search, this tentative
-          // index needs to be compared by the filter member if its <= filter then from that
-          // index the bitset will be considered for filtering process.
-          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start))
-              <= 0) {
-            start = start - 1;
-          }
-        }
-        last = start;
-        for (int j = start; j >= skip; j--) {
-          bitSet.set(j);
-          last--;
-        }
-        startIndex = last;
-        if (startIndex <= 0) {
-          break;
-        }
-      }
-    }
-    return bitSet;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java b/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
deleted file mode 100644
index 00f927b..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import java.util.BitSet;
-import java.util.List;
-
-import org.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
-import org.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
-import org.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.scan.expression.Expression;
-import org.carbondata.scan.expression.exception.FilterUnsupportedException;
-import org.carbondata.scan.filter.FilterUtil;
-import org.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.carbondata.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
-import org.carbondata.scan.processor.BlocksChunkHolder;
-
-public class RowLevelRangeLessThanFiterExecuterImpl extends RowLevelFilterExecuterImpl {
-  private byte[][] filterRangeValues;
-
-  public RowLevelRangeLessThanFiterExecuterImpl(
-      List<DimColumnResolvedFilterInfo> dimColEvaluatorInfoList,
-      List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression exp,
-      AbsoluteTableIdentifier tableIdentifier, byte[][] filterRangeValues,
-      SegmentProperties segmentProperties) {
-    super(dimColEvaluatorInfoList, msrColEvalutorInfoList, exp, tableIdentifier, segmentProperties,
-        null);
-    this.filterRangeValues = filterRangeValues;
-  }
-
-  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
-    BitSet bitSet = new BitSet(1);
-    byte[][] filterValues = this.filterRangeValues;
-    int columnIndex = this.dimColEvaluatorInfoList.get(0).getColumnIndex();
-    boolean isScanRequired = false;
-    for (int k = 0; k < filterValues.length; k++) {
-      // and filter-min should be positive
-      int minCompare =
-          ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterValues[k], blockMinValue[columnIndex]);
-
-      // if any filter applied is not in range of min and max of block
-      // then since its a less than fiter validate whether the block
-      // min range is less  than applied filter member
-      if (minCompare > 0) {
-        isScanRequired = true;
-        break;
-      }
-    }
-    if (isScanRequired) {
-      bitSet.set(0);
-    }
-    return bitSet;
-
-  }
-
-  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
-      throws FilterUnsupportedException {
-    if (!dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY)) {
-      return super.applyFilter(blockChunkHolder);
-    }
-    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
-        .get(dimColEvaluatorInfoList.get(0).getColumnIndex());
-    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
-      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
-          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
-    }
-    return getFilteredIndexes(blockChunkHolder.getDimensionDataChunk()[blockIndex],
-        blockChunkHolder.getDataBlock().nodeSize());
-  }
-
-  private BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows) {
-    byte[] defaultValue = null;
-    if (dimColEvaluatorInfoList.get(0).getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
-      DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
-          .getDirectDictionaryGenerator(
-              dimColEvaluatorInfoList.get(0).getDimension().getDataType());
-      int key = directDictionaryGenerator.generateDirectSurrogateKey(null) + 1;
-      defaultValue = FilterUtil.getMaskKey(key, dimColEvaluatorInfoList.get(0).getDimension(),
-          this.segmentProperties.getDimensionKeyGenerator());
-    }
-    if (null != dimensionColumnDataChunk.getAttributes().getInvertedIndexes()
-        && dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      return setFilterdIndexToBitSetWithColumnIndex(
-          (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, numerOfRows, defaultValue);
-    }
-    return setFilterdIndexToBitSet(dimensionColumnDataChunk, numerOfRows, defaultValue);
-  }
-
-  /**
-   * Method will scan the block and finds the range start index from which all members
-   * will be considered for applying range filters. this method will be called if the
-   * column is not supported by default so column index mapping  will be present for
-   * accesing the members from the block.
-   *
-   * @param dimensionColumnDataChunk
-   * @param numerOfRows
-   * @return BitSet.
-   */
-  private BitSet setFilterdIndexToBitSetWithColumnIndex(
-      FixedLengthDimensionDataChunk dimensionColumnDataChunk, int numerOfRows,
-      byte[] defaultValue) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    int[] columnIndex = dimensionColumnDataChunk.getAttributes().getInvertedIndexes();
-    int start = 0;
-    int last = 0;
-    int startIndex = 0;
-    int skip = 0;
-    byte[][] filterValues = this.filterRangeValues;
-
-    //find the number of default values to skip the null value in case of direct dictionary
-    if (null != defaultValue) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              defaultValue, false);
-      if (start < 0) {
-        skip = -(start + 1);
-        // end of block
-        if (skip == numerOfRows) {
-          return bitSet;
-        }
-      } else {
-        skip = start;
-      }
-      startIndex = skip;
-    }
-
-    for (int i = 0; i < filterValues.length; i++) {
-      start = CarbonUtil
-          .getFirstIndexUsingBinarySearch(dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-              filterValues[i], false);
-      // Logic will handle the case where the range filter member is not present in block
-      // in this case the binary search will return the index from where the bit sets will be
-      // set inorder to apply filters. this is Lesser than filter so the range will be taken
-      // from the prev element which is Lesser than filter member.
-      start = CarbonUtil.nextLesserValueToTarget(start, dimensionColumnDataChunk, filterValues[i]);
-      if (start < 0) {
-        start = -(start + 1);
-        if (start == numerOfRows) {
-          start = start - 1;
-        }
-        // Method will compare the tentative index value after binary search, this tentative
-        // index needs to be compared by the filter member if its < filter then from that
-        // index the bitset will be considered for filtering process.
-        if (ByteUtil
-            .compare(filterValues[i], dimensionColumnDataChunk.getChunkData(columnIndex[start]))
-            < 0) {
-          start = start - 1;
-        }
-      }
-      last = start;
-      for (int j = start; j >= skip; j--) {
-        bitSet.set(columnIndex[j]);
-        last--;
-      }
-      startIndex = last;
-      if (startIndex >= 0) {
-        break;
-      }
-    }
-    return bitSet;
-  }
-
-  /**
-   * Method will scan the block and finds the range start index from which all
-   * members will be considered for applying range filters. this method will
-   * be called if the column is sorted default so column index
-   * mapping will be present for accesing the members from the block.
-   *
-   * @param dimensionColumnDataChunk
-   * @param numerOfRows
-   * @return BitSet.
-   */
-  private BitSet setFilterdIndexToBitSet(DimensionColumnDataChunk dimensionColumnDataChunk,
-      int numerOfRows, byte[] defaultValue) {
-    BitSet bitSet = new BitSet(numerOfRows);
-    if (dimensionColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
-      int start = 0;
-      int last = 0;
-      int startIndex = 0;
-      int skip = 0;
-      byte[][] filterValues = this.filterRangeValues;
-      //find the number of default values to skip the null value in case of direct dictionary
-      if (null != defaultValue) {
-        start = CarbonUtil.getFirstIndexUsingBinarySearch(
-            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-            defaultValue, false);
-        if (start < 0) {
-          skip = -(start + 1);
-          // end of block
-          if (skip == numerOfRows) {
-            return bitSet;
-          }
-        } else {
-          skip = start;
-        }
-        startIndex = skip;
-      }
-      for (int k = 0; k < filterValues.length; k++) {
-        start = CarbonUtil.getFirstIndexUsingBinarySearch(
-            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, startIndex, numerOfRows - 1,
-            filterValues[k], false);
-        start = CarbonUtil.nextLesserValueToTarget(start,
-            (FixedLengthDimensionDataChunk) dimensionColumnDataChunk, filterValues[k]);
-        if (start < 0) {
-          start = -(start + 1);
-          if (start >= numerOfRows) {
-            start = numerOfRows - 1;
-          }
-          // Method will compare the tentative index value after binary search, this tentative
-          // index needs to be compared by the filter member if its < filter then from that
-          // index the bitset will be considered for filtering process.
-          if (ByteUtil.compare(filterValues[k], dimensionColumnDataChunk.getChunkData(start)) < 0) {
-            start = start - 1;
-          }
-        }
-        last = start;
-        for (int j = start; j >= skip; j--) {
-          bitSet.set(j);
-          last--;
-        }
-        startIndex = last;
-        if (startIndex <= 0) {
-          break;
-        }
-      }
-    }
-    return bitSet;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeTypeExecuterFacory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeTypeExecuterFacory.java b/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeTypeExecuterFacory.java
deleted file mode 100644
index e6eb6da..0000000
--- a/core/src/main/java/org/carbondata/scan/filter/executer/RowLevelRangeTypeExecuterFacory.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.scan.filter.executer;
-
-import org.carbondata.core.carbon.datastore.block.SegmentProperties;
-import org.carbondata.scan.filter.intf.FilterExecuterType;
-import org.carbondata.scan.filter.resolver.FilterResolverIntf;
-import org.carbondata.scan.filter.resolver.RowLevelRangeFilterResolverImpl;
-
-public class RowLevelRangeTypeExecuterFacory {
-
-  private RowLevelRangeTypeExecuterFacory() {
-
-  }
-
-  /**
-   * The method returns the Row Level Range fiter type instance based on
-   * filter tree resolver type.
-   *
-   * @param filterExpressionResolverTree
-   * @param segmentProperties
-   * @param dataType                     DataType
-   * @return the generator instance
-   */
-  public static RowLevelFilterExecuterImpl getRowLevelRangeTypeExecuter(
-      FilterExecuterType filterExecuterType, FilterResolverIntf filterExpressionResolverTree,
-      SegmentProperties segmentProperties) {
-    switch (filterExecuterType) {
-
-      case ROWLEVEL_LESSTHAN:
-        return new RowLevelRangeLessThanFiterExecuterImpl(
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getDimColEvaluatorInfoList(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getMsrColEvalutorInfoList(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getFilterExpression(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getFilterRangeValues(segmentProperties), segmentProperties);
-      case ROWLEVEL_LESSTHAN_EQUALTO:
-        return new RowLevelRangeLessThanEqualFilterExecuterImpl(
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getDimColEvaluatorInfoList(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getMsrColEvalutorInfoList(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getFilterExpression(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getFilterRangeValues(segmentProperties), segmentProperties);
-      case ROWLEVEL_GREATERTHAN_EQUALTO:
-        return new RowLevelRangeGrtrThanEquaToFilterExecuterImpl(
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getDimColEvaluatorInfoList(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getMsrColEvalutorInfoList(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getFilterExpression(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getFilterRangeValues(segmentProperties), segmentProperties);
-      case ROWLEVEL_GREATERTHAN:
-        return new RowLevelRangeGrtThanFiterExecuterImpl(
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getDimColEvaluatorInfoList(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getMsrColEvalutorInfoList(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getFilterExpression(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
-            ((RowLevelRangeFilterResolverImpl) filterExpressionResolverTree)
-                .getFilterRangeValues(segmentProperties), segmentProperties);
-      default:
-        // Scenario wont come logic must break
-        return null;
-
-    }
-  }
-
-}



[29/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/FilterUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/FilterUtil.java b/core/src/main/java/org/apache/carbondata/scan/filter/FilterUtil.java
new file mode 100644
index 0000000..a96b0b2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/FilterUtil.java
@@ -0,0 +1,1395 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter;
+
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.text.SimpleDateFormat;
+import java.util.*;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.cache.Cache;
+import org.apache.carbondata.core.cache.CacheProvider;
+import org.apache.carbondata.core.cache.CacheType;
+import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.cache.dictionary.DictionaryChunksWrapper;
+import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
+import org.apache.carbondata.core.cache.dictionary.ForwardDictionary;
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.carbon.datastore.IndexKey;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.CarbonUtilException;
+import org.apache.carbondata.core.util.DataTypeUtil;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.LiteralExpression;
+import org.apache.carbondata.scan.expression.UnknownExpression;
+import org.apache.carbondata.scan.expression.conditional.ListExpression;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.executer.*;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.FilterExecuterType;
+import org.apache.carbondata.scan.filter.intf.RowImpl;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+import org.apache.carbondata.scan.filter.resolver.FilterResolverIntf;
+import org.apache.carbondata.scan.filter.resolver.RowLevelFilterResolverImpl;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+
+public final class FilterUtil {
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(FilterUtil.class.getName());
+
+  private FilterUtil() {
+
+  }
+
+  /**
+   * Pattern used : Visitor Pattern
+   * Method will create filter executer tree based on the filter resolved tree,
+   * in this algorithm based on the resolver instance the executers will be visited
+   * and the resolved surrogates will be converted to keys
+   *
+   * @param filterExpressionResolverTree
+   * @param segmentProperties
+   * @return FilterExecuter instance
+   */
+  private static FilterExecuter createFilterExecuterTree(
+      FilterResolverIntf filterExpressionResolverTree, SegmentProperties segmentProperties,
+      Map<Integer, GenericQueryType> complexDimensionInfoMap) {
+    FilterExecuterType filterExecuterType = filterExpressionResolverTree.getFilterExecuterType();
+    if (null != filterExecuterType) {
+      switch (filterExecuterType) {
+        case INCLUDE:
+          return getIncludeFilterExecuter(
+              filterExpressionResolverTree.getDimColResolvedFilterInfo(), segmentProperties);
+        case EXCLUDE:
+          return getExcludeFilterExecuter(
+              filterExpressionResolverTree.getDimColResolvedFilterInfo(), segmentProperties);
+        case OR:
+          return new OrFilterExecuterImpl(
+              createFilterExecuterTree(filterExpressionResolverTree.getLeft(), segmentProperties,
+                  complexDimensionInfoMap),
+              createFilterExecuterTree(filterExpressionResolverTree.getRight(), segmentProperties,
+                  complexDimensionInfoMap));
+        case AND:
+          return new AndFilterExecuterImpl(
+              createFilterExecuterTree(filterExpressionResolverTree.getLeft(), segmentProperties,
+                  complexDimensionInfoMap),
+              createFilterExecuterTree(filterExpressionResolverTree.getRight(), segmentProperties,
+                  complexDimensionInfoMap));
+        case RESTRUCTURE:
+          return new RestructureFilterExecuterImpl(
+              filterExpressionResolverTree.getDimColResolvedFilterInfo(),
+              segmentProperties.getDimensionKeyGenerator());
+        case ROWLEVEL_LESSTHAN:
+        case ROWLEVEL_LESSTHAN_EQUALTO:
+        case ROWLEVEL_GREATERTHAN_EQUALTO:
+        case ROWLEVEL_GREATERTHAN:
+          return RowLevelRangeTypeExecuterFacory
+              .getRowLevelRangeTypeExecuter(filterExecuterType, filterExpressionResolverTree,
+                  segmentProperties);
+        case ROWLEVEL:
+        default:
+          return new RowLevelFilterExecuterImpl(
+              ((RowLevelFilterResolverImpl) filterExpressionResolverTree)
+                  .getDimColEvaluatorInfoList(),
+              ((RowLevelFilterResolverImpl) filterExpressionResolverTree)
+                  .getMsrColEvalutorInfoList(),
+              ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getFilterExpresion(),
+              ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
+              segmentProperties, complexDimensionInfoMap);
+
+      }
+    }
+    return new RowLevelFilterExecuterImpl(
+        ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getDimColEvaluatorInfoList(),
+        ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getMsrColEvalutorInfoList(),
+        ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getFilterExpresion(),
+        ((RowLevelFilterResolverImpl) filterExpressionResolverTree).getTableIdentifier(),
+        segmentProperties, complexDimensionInfoMap);
+
+  }
+
+  /**
+   * It gives filter executer based on columnar or column group
+   *
+   * @param dimColResolvedFilterInfo
+   * @param segmentProperties
+   * @return
+   */
+  private static FilterExecuter getIncludeFilterExecuter(
+      DimColumnResolvedFilterInfo dimColResolvedFilterInfo, SegmentProperties segmentProperties) {
+
+    if (dimColResolvedFilterInfo.getDimension().isColumnar()) {
+      return new IncludeFilterExecuterImpl(dimColResolvedFilterInfo, segmentProperties);
+    } else {
+      return new IncludeColGroupFilterExecuterImpl(dimColResolvedFilterInfo, segmentProperties);
+    }
+  }
+
+  /**
+   * It gives filter executer based on columnar or column group
+   *
+   * @param dimColResolvedFilterInfo
+   * @param segmentProperties
+   * @return
+   */
+  private static FilterExecuter getExcludeFilterExecuter(
+      DimColumnResolvedFilterInfo dimColResolvedFilterInfo, SegmentProperties segmentProperties) {
+
+    if (dimColResolvedFilterInfo.getDimension().isColumnar()) {
+      return new ExcludeFilterExecuterImpl(dimColResolvedFilterInfo, segmentProperties);
+    } else {
+      return new ExcludeColGroupFilterExecuterImpl(dimColResolvedFilterInfo, segmentProperties);
+    }
+  }
+
+  /**
+   * This method will check if a given expression contains a column expression
+   * recursively.
+   *
+   * @return
+   */
+  public static boolean checkIfExpressionContainsColumn(Expression expression) {
+    if (expression instanceof ColumnExpression) {
+      return true;
+    }
+    for (Expression child : expression.getChildren()) {
+      if (checkIfExpressionContainsColumn(child)) {
+        return true;
+      }
+    }
+
+    return false;
+  }
+
+  /**
+   * This method will check if a given expression contains a column expression
+   * recursively.
+   *
+   * @return
+   */
+  public static boolean checkIfLeftExpressionRequireEvaluation(Expression expression) {
+    if (expression.getFilterExpressionType() == ExpressionType.UNKNOWN
+        || !(expression instanceof ColumnExpression)) {
+      return true;
+    }
+    for (Expression child : expression.getChildren()) {
+      if (checkIfLeftExpressionRequireEvaluation(child)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * This method will check if a given literal expression is not a timestamp datatype
+   * recursively.
+   *
+   * @return
+   */
+  public static boolean checkIfDataTypeNotTimeStamp(Expression expression) {
+    if (expression.getFilterExpressionType() == ExpressionType.LITERAL) {
+      if (!(((LiteralExpression) expression).getLiteralExpDataType()
+          == DataType.TIMESTAMP)) {
+        return true;
+      }
+    }
+    for (Expression child : expression.getChildren()) {
+      if (checkIfDataTypeNotTimeStamp(child)) {
+        return true;
+      }
+    }
+    return false;
+  }
+  /**
+   * This method will check if a given expression contains a column expression
+   * recursively.
+   *
+   * @return
+   */
+  public static boolean checkIfRightExpressionRequireEvaluation(Expression expression) {
+    if (expression.getFilterExpressionType() == ExpressionType.UNKNOWN
+        || !(expression instanceof LiteralExpression) && !(expression instanceof ListExpression)) {
+      return true;
+    }
+    for (Expression child : expression.getChildren()) {
+      if (checkIfRightExpressionRequireEvaluation(child)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * method will get the masked keys based on the keys generated from surrogates.
+   *
+   * @param ranges
+   * @param key
+   * @return byte[]
+   */
+  private static byte[] getMaskedKey(int[] ranges, byte[] key) {
+    byte[] maskkey = new byte[ranges.length];
+
+    for (int i = 0; i < maskkey.length; i++) {
+      maskkey[i] = key[ranges[i]];
+    }
+    return maskkey;
+  }
+
+  /**
+   * This method will return the ranges for the masked Bytes based on the key
+   * Generator.
+   *
+   * @param queryDimensionsOrdinal
+   * @param generator
+   * @return
+   */
+  private static int[] getRangesForMaskedByte(int queryDimensionsOrdinal, KeyGenerator generator) {
+    Set<Integer> integers = new TreeSet<Integer>();
+    int[] range = generator.getKeyByteOffsets(queryDimensionsOrdinal);
+    for (int j = range[0]; j <= range[1]; j++) {
+      integers.add(j);
+    }
+
+    int[] byteIndexs = new int[integers.size()];
+    int j = 0;
+    for (Iterator<Integer> iterator = integers.iterator(); iterator.hasNext(); ) {
+      Integer integer = iterator.next();
+      byteIndexs[j++] = integer.intValue();
+    }
+    return byteIndexs;
+  }
+
+  /**
+   * This method will get the no dictionary data based on filters and same
+   * will be in DimColumnFilterInfo
+   *
+   * @param tableIdentifier
+   * @param columnExpression
+   * @param evaluateResultListFinal
+   * @param isIncludeFilter
+   * @return DimColumnFilterInfo
+   */
+  public static DimColumnFilterInfo getNoDictionaryValKeyMemberForFilter(
+      AbsoluteTableIdentifier tableIdentifier, ColumnExpression columnExpression,
+      List<String> evaluateResultListFinal, boolean isIncludeFilter) {
+    List<byte[]> filterValuesList = new ArrayList<byte[]>(20);
+    for (String result : evaluateResultListFinal) {
+      filterValuesList.add(result.getBytes(Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET)));
+    }
+
+    Comparator<byte[]> filterNoDictValueComaparator = new Comparator<byte[]>() {
+
+      @Override public int compare(byte[] filterMember1, byte[] filterMember2) {
+        // TODO Auto-generated method stub
+        return ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterMember1, filterMember2);
+      }
+
+    };
+    Collections.sort(filterValuesList, filterNoDictValueComaparator);
+    DimColumnFilterInfo columnFilterInfo = null;
+    if (filterValuesList.size() > 0) {
+      columnFilterInfo = new DimColumnFilterInfo();
+      columnFilterInfo.setIncludeFilter(isIncludeFilter);
+      columnFilterInfo.setFilterListForNoDictionaryCols(filterValuesList);
+
+    }
+    return columnFilterInfo;
+  }
+
+  /**
+   * Method will prepare the  dimfilterinfo instance by resolving the filter
+   * expression value to its respective surrogates.
+   *
+   * @param tableIdentifier
+   * @param columnExpression
+   * @param evaluateResultList
+   * @param isIncludeFilter
+   * @return
+   * @throws QueryExecutionException
+   */
+  public static DimColumnFilterInfo getFilterValues(AbsoluteTableIdentifier tableIdentifier,
+      ColumnExpression columnExpression, List<String> evaluateResultList, boolean isIncludeFilter)
+      throws QueryExecutionException {
+    Dictionary forwardDictionary = null;
+    try {
+      // Reading the dictionary value from cache.
+      forwardDictionary =
+          getForwardDictionaryCache(tableIdentifier, columnExpression.getDimension());
+      return getFilterValues(columnExpression, evaluateResultList, forwardDictionary,
+          isIncludeFilter);
+    } finally {
+      CarbonUtil.clearDictionaryCache(forwardDictionary);
+    }
+  }
+
+  /**
+   * Method will prepare the  dimfilterinfo instance by resolving the filter
+   * expression value to its respective surrogates.
+   *
+   * @param columnExpression
+   * @param evaluateResultList
+   * @param forwardDictionary
+   * @param isIncludeFilter
+   * @return
+   * @throws QueryExecutionException
+   */
+  private static DimColumnFilterInfo getFilterValues(ColumnExpression columnExpression,
+      List<String> evaluateResultList, Dictionary forwardDictionary, boolean isIncludeFilter)
+      throws QueryExecutionException {
+    sortFilterModelMembers(columnExpression, evaluateResultList);
+    List<Integer> surrogates =
+        new ArrayList<Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    // Reading the dictionary value from cache.
+    getDictionaryValue(evaluateResultList, forwardDictionary, surrogates);
+    Collections.sort(surrogates);
+    DimColumnFilterInfo columnFilterInfo = null;
+    if (surrogates.size() > 0) {
+      columnFilterInfo = new DimColumnFilterInfo();
+      columnFilterInfo.setIncludeFilter(isIncludeFilter);
+      columnFilterInfo.setFilterList(surrogates);
+    }
+    return columnFilterInfo;
+  }
+
+  /**
+   * This API will get the Dictionary value for the respective filter member
+   * string.
+   *
+   * @param evaluateResultList filter value
+   * @param surrogates
+   * @throws QueryExecutionException
+   */
+  private static void getDictionaryValue(List<String> evaluateResultList,
+      Dictionary forwardDictionary, List<Integer> surrogates) throws QueryExecutionException {
+    ((ForwardDictionary) forwardDictionary)
+        .getSurrogateKeyByIncrementalSearch(evaluateResultList, surrogates);
+  }
+
+  /**
+   * This method will get all the members of column from the forward dictionary
+   * cache, this method will be basically used in row level filter resolver.
+   *
+   * @param tableIdentifier
+   * @param expression
+   * @param columnExpression
+   * @param isIncludeFilter
+   * @return DimColumnFilterInfo
+   * @throws FilterUnsupportedException
+   * @throws QueryExecutionException
+   */
+  public static DimColumnFilterInfo getFilterListForAllValues(
+      AbsoluteTableIdentifier tableIdentifier, Expression expression,
+      final ColumnExpression columnExpression, boolean isIncludeFilter)
+      throws FilterUnsupportedException {
+    Dictionary forwardDictionary = null;
+    List<String> evaluateResultListFinal = new ArrayList<String>(20);
+    DictionaryChunksWrapper dictionaryWrapper = null;
+    try {
+      forwardDictionary =
+          getForwardDictionaryCache(tableIdentifier, columnExpression.getDimension());
+      dictionaryWrapper = forwardDictionary.getDictionaryChunks();
+      while (dictionaryWrapper.hasNext()) {
+        byte[] columnVal = dictionaryWrapper.next();
+        try {
+          RowIntf row = new RowImpl();
+          String stringValue =
+              new String(columnVal, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
+          if (stringValue.equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
+            stringValue = null;
+          }
+          row.setValues(new Object[] { DataTypeUtil.getDataBasedOnDataType(stringValue,
+              columnExpression.getCarbonColumn().getDataType()) });
+          Boolean rslt = expression.evaluate(row).getBoolean();
+          if (null != rslt && !(rslt ^ isIncludeFilter)) {
+            if (null == stringValue) {
+              evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
+            } else {
+              evaluateResultListFinal.add(stringValue);
+            }
+          }
+        } catch (FilterIllegalMemberException e) {
+          LOGGER.debug(e.getMessage());
+        }
+      }
+      return getFilterValues(columnExpression, evaluateResultListFinal, forwardDictionary,
+          isIncludeFilter);
+    } catch (QueryExecutionException e) {
+      throw new FilterUnsupportedException(e.getMessage());
+    } finally {
+      CarbonUtil.clearDictionaryCache(forwardDictionary);
+    }
+  }
+
+  private static void sortFilterModelMembers(final ColumnExpression columnExpression,
+      List<String> evaluateResultListFinal) {
+    Comparator<String> filterActualValueComaparator = new Comparator<String>() {
+
+      @Override public int compare(String filterMember1, String filterMember2) {
+        return compareFilterMembersBasedOnActualDataType(filterMember1, filterMember2,
+            columnExpression.getDataType());
+      }
+
+    };
+    Collections.sort(evaluateResultListFinal, filterActualValueComaparator);
+  }
+
+  /**
+   * Metahod will resolve the filter member to its respective surrogates by
+   * scanning the dictionary cache.
+   *
+   * @param tableIdentifier
+   * @param expression
+   * @param columnExpression
+   * @param isIncludeFilter
+   * @return
+   * @throws QueryExecutionException
+   * @throws FilterUnsupportedException
+   */
+  public static DimColumnFilterInfo getFilterList(AbsoluteTableIdentifier tableIdentifier,
+      Expression expression, ColumnExpression columnExpression, boolean isIncludeFilter)
+      throws QueryExecutionException, FilterUnsupportedException {
+    DimColumnFilterInfo resolvedFilterObject = null;
+    List<String> evaluateResultListFinal = new ArrayList<String>(20);
+    try {
+      List<ExpressionResult> evaluateResultList = expression.evaluate(null).getList();
+      for (ExpressionResult result : evaluateResultList) {
+        if (result.getString() == null) {
+          evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
+          continue;
+        }
+        evaluateResultListFinal.add(result.getString());
+      }
+
+      if (null != columnExpression.getCarbonColumn() && !columnExpression.getCarbonColumn()
+          .hasEncoding(Encoding.DICTIONARY)) {
+        resolvedFilterObject =
+            getNoDictionaryValKeyMemberForFilter(tableIdentifier, columnExpression,
+                evaluateResultListFinal, isIncludeFilter);
+      } else {
+        resolvedFilterObject =
+            getFilterValues(tableIdentifier, columnExpression, evaluateResultListFinal,
+                isIncludeFilter);
+      }
+    } catch (FilterIllegalMemberException e) {
+      LOGGER.audit(e.getMessage());
+    }
+    return resolvedFilterObject;
+  }
+
+  /**
+   * Method will prepare the  dimfilterinfo instance by resolving the filter
+   * expression value to its respective surrogates in the scenario of restructure.
+   *
+   * @param expression
+   * @param columnExpression
+   * @param defaultValues
+   * @param defaultSurrogate
+   * @return
+   * @throws FilterUnsupportedException
+   */
+  public static DimColumnFilterInfo getFilterListForRS(Expression expression,
+      ColumnExpression columnExpression, String defaultValues, int defaultSurrogate)
+      throws FilterUnsupportedException {
+    List<Integer> filterValuesList = new ArrayList<Integer>(20);
+    DimColumnFilterInfo columnFilterInfo = null;
+    // List<byte[]> filterValuesList = new ArrayList<byte[]>(20);
+    List<String> evaluateResultListFinal = new ArrayList<String>(20);
+    // KeyGenerator keyGenerator =
+    // KeyGeneratorFactory.getKeyGenerator(new int[] { defaultSurrogate });
+    try {
+      List<ExpressionResult> evaluateResultList = expression.evaluate(null).getList();
+      for (ExpressionResult result : evaluateResultList) {
+        if (result.getString() == null) {
+          evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
+          continue;
+        }
+        evaluateResultListFinal.add(result.getString());
+      }
+
+      for (int i = 0; i < evaluateResultListFinal.size(); i++) {
+        if (evaluateResultListFinal.get(i).equals(defaultValues)) {
+          filterValuesList.add(defaultSurrogate);
+          break;
+        }
+      }
+      if (filterValuesList.size() > 0) {
+        columnFilterInfo = new DimColumnFilterInfo();
+        columnFilterInfo.setFilterList(filterValuesList);
+      }
+    } catch (FilterIllegalMemberException e) {
+      LOGGER.audit(e.getMessage());
+    }
+    return columnFilterInfo;
+  }
+
+  /**
+   * This method will get the member based on filter expression evaluation from the
+   * forward dictionary cache, this method will be basically used in restructure.
+   *
+   * @param expression
+   * @param columnExpression
+   * @param defaultValues
+   * @param defaultSurrogate
+   * @param isIncludeFilter
+   * @return
+   * @throws FilterUnsupportedException
+   */
+  public static DimColumnFilterInfo getFilterListForAllMembersRS(Expression expression,
+      ColumnExpression columnExpression, String defaultValues, int defaultSurrogate,
+      boolean isIncludeFilter) throws FilterUnsupportedException {
+    List<Integer> filterValuesList = new ArrayList<Integer>(20);
+    List<String> evaluateResultListFinal = new ArrayList<String>(20);
+    DimColumnFilterInfo columnFilterInfo = null;
+
+    // KeyGenerator keyGenerator =
+    // KeyGeneratorFactory.getKeyGenerator(new int[] { defaultSurrogate });
+    try {
+      RowIntf row = new RowImpl();
+      if (defaultValues.equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL)) {
+        defaultValues = null;
+      }
+      row.setValues(new Object[] { DataTypeUtil.getDataBasedOnDataType(defaultValues,
+          columnExpression.getCarbonColumn().getDataType()) });
+      Boolean rslt = expression.evaluate(row).getBoolean();
+      if (null != rslt && !(rslt ^ isIncludeFilter)) {
+        if (null == defaultValues) {
+          evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
+        } else {
+          evaluateResultListFinal.add(defaultValues);
+        }
+      }
+    } catch (FilterIllegalMemberException e) {
+      LOGGER.audit(e.getMessage());
+    }
+
+    if (null == defaultValues) {
+      defaultValues = CarbonCommonConstants.MEMBER_DEFAULT_VAL;
+    }
+    columnFilterInfo = new DimColumnFilterInfo();
+    for (int i = 0; i < evaluateResultListFinal.size(); i++) {
+      if (evaluateResultListFinal.get(i).equals(defaultValues)) {
+        filterValuesList.add(defaultSurrogate);
+        break;
+      }
+    }
+    columnFilterInfo.setFilterList(filterValuesList);
+    return columnFilterInfo;
+  }
+
+  public static byte[][] getKeyArray(DimColumnFilterInfo dimColumnFilterInfo,
+      CarbonDimension carbonDimension, KeyGenerator blockLevelKeyGenerator) {
+    if (!carbonDimension.hasEncoding(Encoding.DICTIONARY)) {
+      return dimColumnFilterInfo.getNoDictionaryFilterValuesList()
+          .toArray((new byte[dimColumnFilterInfo.getNoDictionaryFilterValuesList().size()][]));
+    }
+    int[] keys = new int[blockLevelKeyGenerator.getDimCount()];
+    List<byte[]> filterValuesList = new ArrayList<byte[]>(20);
+    Arrays.fill(keys, 0);
+    int[] rangesForMaskedByte =
+        getRangesForMaskedByte((carbonDimension.getKeyOrdinal()), blockLevelKeyGenerator);
+    if (null != dimColumnFilterInfo) {
+      for (Integer surrogate : dimColumnFilterInfo.getFilterList()) {
+        try {
+          keys[carbonDimension.getKeyOrdinal()] = surrogate;
+          filterValuesList
+              .add(getMaskedKey(rangesForMaskedByte, blockLevelKeyGenerator.generateKey(keys)));
+        } catch (KeyGenException e) {
+          LOGGER.error(e.getMessage());
+        }
+      }
+
+    }
+    return filterValuesList.toArray(new byte[filterValuesList.size()][]);
+
+  }
+
+  /**
+   * The method is used to get the single dictionary key's mask key
+   *
+   * @param surrogate
+   * @param carbonDimension
+   * @param blockLevelKeyGenerator
+   * @return
+   */
+  public static byte[] getMaskKey(int surrogate, CarbonDimension carbonDimension,
+      KeyGenerator blockLevelKeyGenerator) {
+
+    int[] keys = new int[blockLevelKeyGenerator.getDimCount()];
+    byte[] maskedKey = null;
+    Arrays.fill(keys, 0);
+    int[] rangesForMaskedByte =
+        getRangesForMaskedByte((carbonDimension.getKeyOrdinal()), blockLevelKeyGenerator);
+    try {
+      keys[carbonDimension.getKeyOrdinal()] = surrogate;
+      maskedKey = getMaskedKey(rangesForMaskedByte, blockLevelKeyGenerator.generateKey(keys));
+    } catch (KeyGenException e) {
+      LOGGER.error(e.getMessage());
+    }
+    return maskedKey;
+  }
+
+  /**
+   * Method will return the start key based on KeyGenerator for the respective
+   * filter resolved instance.
+   *
+   * @param dimColResolvedFilterInfo
+   * @param segmentProperties
+   * @return long[] start key
+   */
+  public static void getStartKey(Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter,
+      long[] startKey, List<long[]> startKeyList) throws QueryExecutionException {
+    for(int i = 0; i < startKey.length; i++) {
+      // The min surrogate key is 1, set it as the init value for starkey of each column level
+      startKey[i] = 1;
+    }
+    getStartKeyWithFilter(dimensionFilter, startKey, startKeyList);
+  }
+
+  /**
+   * Algorithm for getting the start key for a filter
+   * step 1: Iterate through each dimension and verify whether its not an exclude filter.
+   * step 2: Intialize start key with the first filter member value present in each filter model
+   * for the respective dimensions.
+   * step 3: since its a no dictionary start key there will only actual value so compare
+   * the first filter model value with respect to the dimension data type.
+   * step 4: The least value will be considered as the start key of dimension by comparing all
+   * its filter model.
+   * step 5: create a byte array of start key which comprises of least filter member value of
+   * all dimension and the indexes which will help to read the respective filter value.
+   *
+   * @param dimColResolvedFilterInfo
+   * @param segmentProperties
+   * @param setOfStartKeyByteArray
+   * @return
+   */
+  public static void getStartKeyForNoDictionaryDimension(
+      DimColumnResolvedFilterInfo dimColResolvedFilterInfo,
+      SortedMap<Integer, byte[]> setOfStartKeyByteArray) {
+    Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter =
+        dimColResolvedFilterInfo.getDimensionResolvedFilterInstance();
+    // step 1
+    for (Map.Entry<CarbonDimension, List<DimColumnFilterInfo>> entry : dimensionFilter.entrySet()) {
+      if (!entry.getKey().hasEncoding(Encoding.DICTIONARY)) {
+        List<DimColumnFilterInfo> listOfDimColFilterInfo = entry.getValue();
+        if (null == listOfDimColFilterInfo) {
+          continue;
+        }
+        boolean isExcludePresent = false;
+        for (DimColumnFilterInfo info : listOfDimColFilterInfo) {
+          if (!info.isIncludeFilter()) {
+            isExcludePresent = true;
+          }
+        }
+        if (isExcludePresent) {
+          continue;
+        }
+        // step 2
+        byte[] noDictionaryStartKey =
+            listOfDimColFilterInfo.get(0).getNoDictionaryFilterValuesList().get(0);
+        if (setOfStartKeyByteArray.isEmpty()) {
+          setOfStartKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryStartKey);
+        } else if (null == setOfStartKeyByteArray.get(entry.getKey().getOrdinal())) {
+          setOfStartKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryStartKey);
+
+        } else if (ByteUtil.UnsafeComparer.INSTANCE
+            .compareTo(setOfStartKeyByteArray.get(entry.getKey().getOrdinal()),
+                noDictionaryStartKey) > 0) {
+          setOfStartKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryStartKey);
+        }
+      }
+    }
+  }
+
+  /**
+   * Algorithm for getting the end key for a filter
+   * step 1: Iterate through each dimension and verify whether its not an exclude filter.
+   * step 2: Initialize end key with the last filter member value present in each filter model
+   * for the respective dimensions.(Already filter models are sorted)
+   * step 3: since its a no dictionary end key there will only actual value so compare
+   * the last filter model value with respect to the dimension data type.
+   * step 4: The highest value will be considered as the end key of dimension by comparing all
+   * its filter model.
+   * step 5: create a byte array of end key which comprises of highest filter member value of
+   * all dimension and the indexes which will help to read the respective filter value.
+   *
+   * @param dimColResolvedFilterInfo
+   * @param segmentProperties
+   * @param setOfEndKeyByteArray
+   * @return end key array
+   */
+  public static void getEndKeyForNoDictionaryDimension(
+      DimColumnResolvedFilterInfo dimColResolvedFilterInfo,
+      SortedMap<Integer, byte[]> setOfEndKeyByteArray) {
+
+    Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter =
+        dimColResolvedFilterInfo.getDimensionResolvedFilterInstance();
+    // step 1
+    for (Map.Entry<CarbonDimension, List<DimColumnFilterInfo>> entry : dimensionFilter.entrySet()) {
+      if (!entry.getKey().hasEncoding(Encoding.DICTIONARY)) {
+        List<DimColumnFilterInfo> listOfDimColFilterInfo = entry.getValue();
+        if (null == listOfDimColFilterInfo) {
+          continue;
+        }
+        boolean isExcludePresent = false;
+        for (DimColumnFilterInfo info : listOfDimColFilterInfo) {
+          if (!info.isIncludeFilter()) {
+            isExcludePresent = true;
+          }
+        }
+        if (isExcludePresent) {
+          continue;
+        }
+        // step 2
+        byte[] noDictionaryEndKey = listOfDimColFilterInfo.get(0).getNoDictionaryFilterValuesList()
+            .get(listOfDimColFilterInfo.get(0).getNoDictionaryFilterValuesList().size() - 1);
+        if (setOfEndKeyByteArray.isEmpty()) {
+          setOfEndKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryEndKey);
+        } else if (null == setOfEndKeyByteArray.get(entry.getKey().getOrdinal())) {
+          setOfEndKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryEndKey);
+
+        } else if (ByteUtil.UnsafeComparer.INSTANCE
+            .compareTo(setOfEndKeyByteArray.get(entry.getKey().getOrdinal()), noDictionaryEndKey)
+            < 0) {
+          setOfEndKeyByteArray.put(entry.getKey().getOrdinal(), noDictionaryEndKey);
+        }
+
+      }
+    }
+  }
+
+  /**
+   * Method will pack all the byte[] to a single byte[] value by appending the
+   * indexes of the byte[] value which needs to be read. this method will be mailny used
+   * in case of no dictionary dimension processing for filters.
+   *
+   * @param noDictionaryValKeyList
+   * @return packed key with its indexes added in starting and its actual values.
+   */
+  private static byte[] getKeyWithIndexesAndValues(List<byte[]> noDictionaryValKeyList) {
+    ByteBuffer[] buffArr = new ByteBuffer[noDictionaryValKeyList.size()];
+    int index = 0;
+    for (byte[] singleColVal : noDictionaryValKeyList) {
+      buffArr[index] = ByteBuffer.allocate(singleColVal.length);
+      buffArr[index].put(singleColVal);
+      buffArr[index++].rewind();
+    }
+    // byteBufer.
+    return CarbonUtil.packByteBufferIntoSingleByteArray(buffArr);
+
+  }
+
+  /**
+   * This method will fill the start key array  with the surrogate key present
+   * in filterinfo instance.
+   *
+   * @param dimensionFilter
+   * @param startKey
+   */
+  private static void getStartKeyWithFilter(
+      Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter, long[] startKey,
+      List<long[]> startKeyList) {
+    for (Map.Entry<CarbonDimension, List<DimColumnFilterInfo>> entry : dimensionFilter.entrySet()) {
+      List<DimColumnFilterInfo> values = entry.getValue();
+      if (null == values || !entry.getKey().hasEncoding(Encoding.DICTIONARY)) {
+        continue;
+      }
+      boolean isExcludePresent = false;
+      for (DimColumnFilterInfo info : values) {
+        if (!info.isIncludeFilter()) {
+          isExcludePresent = true;
+        }
+      }
+      if (isExcludePresent) {
+        continue;
+      }
+      for (DimColumnFilterInfo info : values) {
+        if (startKey[entry.getKey().getKeyOrdinal()] < info.getFilterList().get(0)) {
+          startKey[entry.getKey().getKeyOrdinal()] = info.getFilterList().get(0);
+        }
+      }
+      long[] newStartKey = new long[startKey.length];
+      System.arraycopy(startKey, 0, newStartKey, 0, startKey.length);
+      startKeyList.add(newStartKey);
+    }
+  }
+
+  public static void getEndKey(Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter,
+      AbsoluteTableIdentifier tableIdentifier, long[] endKey, SegmentProperties segmentProperties,
+      List<long[]> endKeyList) throws QueryExecutionException {
+
+    List<CarbonDimension> updatedDimListBasedOnKeyGenerator =
+        getCarbonDimsMappedToKeyGenerator(segmentProperties.getDimensions());
+    for (int i = 0; i < endKey.length; i++) {
+      endKey[i] = getMaxValue(tableIdentifier, updatedDimListBasedOnKeyGenerator.get(i),
+          segmentProperties.getDimColumnsCardinality());
+    }
+    getEndKeyWithFilter(dimensionFilter, endKey, endKeyList);
+
+  }
+
+  private static List<CarbonDimension> getCarbonDimsMappedToKeyGenerator(
+      List<CarbonDimension> carbonDimensions) {
+    List<CarbonDimension> listOfCarbonDimPartOfKeyGen =
+        new ArrayList<CarbonDimension>(carbonDimensions.size());
+    for (CarbonDimension carbonDim : carbonDimensions) {
+      if (CarbonUtil.hasEncoding(carbonDim.getEncoder(), Encoding.DICTIONARY) || CarbonUtil
+          .hasEncoding(carbonDim.getEncoder(), Encoding.DIRECT_DICTIONARY)) {
+        listOfCarbonDimPartOfKeyGen.add(carbonDim);
+      }
+
+    }
+    return listOfCarbonDimPartOfKeyGen;
+  }
+
+  private static void getEndKeyWithFilter(
+      Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter, long[] endKey,
+      List<long[]> endKeyList) {
+    for (Map.Entry<CarbonDimension, List<DimColumnFilterInfo>> entry : dimensionFilter.entrySet()) {
+      List<DimColumnFilterInfo> values = entry.getValue();
+      if (null == values || !entry.getKey().hasEncoding(Encoding.DICTIONARY)) {
+        continue;
+      }
+      boolean isExcludeFilterPresent = false;
+      for (DimColumnFilterInfo info : values) {
+        if (!info.isIncludeFilter()) {
+          isExcludeFilterPresent = true;
+        }
+      }
+      if (isExcludeFilterPresent) {
+        continue;
+      }
+
+      for (DimColumnFilterInfo info : values) {
+        if (endKey[entry.getKey().getKeyOrdinal()] > info.getFilterList()
+            .get(info.getFilterList().size() - 1)) {
+          endKey[entry.getKey().getKeyOrdinal()] =
+              info.getFilterList().get(info.getFilterList().size() - 1);
+        }
+      }
+      long[] newEndKey = new long[endKey.length];
+      System.arraycopy(endKey, 0, newEndKey, 0, endKey.length);
+      endKeyList.add(newEndKey);
+    }
+
+  }
+
+  /**
+   * This API will get the max value of surrogate key which will be used for
+   * determining the end key of particular btree.
+   *
+   * @param dimCarinality
+   * @throws QueryExecutionException
+   */
+  private static long getMaxValue(AbsoluteTableIdentifier tableIdentifier,
+      CarbonDimension carbonDimension, int[] dimCarinality) throws QueryExecutionException {
+    //    if (DataType.TIMESTAMP == carbonDimension.getDataType()) {
+    //      return Integer.MAX_VALUE;
+    //    }
+    // Get data from all the available slices of the table
+    if (null != dimCarinality) {
+      return dimCarinality[carbonDimension.getKeyOrdinal()];
+    }
+    return -1;
+  }
+
+  /**
+   * @param tableIdentifier
+   * @param carbonDimension
+   * @return
+   * @throws QueryExecutionException
+   */
+  public static Dictionary getForwardDictionaryCache(AbsoluteTableIdentifier tableIdentifier,
+      CarbonDimension carbonDimension) throws QueryExecutionException {
+    DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier =
+        new DictionaryColumnUniqueIdentifier(tableIdentifier.getCarbonTableIdentifier(),
+            carbonDimension.getColumnIdentifier(), carbonDimension.getDataType());
+    CacheProvider cacheProvider = CacheProvider.getInstance();
+    Cache forwardDictionaryCache =
+        cacheProvider.createCache(CacheType.FORWARD_DICTIONARY, tableIdentifier.getStorePath());
+    // get the forward dictionary object
+    Dictionary forwardDictionary = null;
+    try {
+      forwardDictionary = (Dictionary) forwardDictionaryCache.get(dictionaryColumnUniqueIdentifier);
+    } catch (CarbonUtilException e) {
+      throw new QueryExecutionException(e);
+    }
+    return forwardDictionary;
+  }
+
+  public static IndexKey createIndexKeyFromResolvedFilterVal(long[] startOrEndKey,
+      KeyGenerator keyGenerator, byte[] startOrEndKeyForNoDictDimension) {
+    IndexKey indexKey = null;
+    try {
+      indexKey =
+          new IndexKey(keyGenerator.generateKey(startOrEndKey), startOrEndKeyForNoDictDimension);
+    } catch (KeyGenException e) {
+      LOGGER.error(e.getMessage());
+    }
+    return indexKey;
+  }
+
+  /**
+   * API will create an filter executer tree based on the filter resolver
+   *
+   * @param filterExpressionResolverTree
+   * @param segmentProperties
+   * @return
+   */
+  public static FilterExecuter getFilterExecuterTree(
+      FilterResolverIntf filterExpressionResolverTree, SegmentProperties segmentProperties,
+      Map<Integer, GenericQueryType> complexDimensionInfoMap) {
+    return createFilterExecuterTree(filterExpressionResolverTree, segmentProperties,
+        complexDimensionInfoMap);
+  }
+
+  /**
+   * API will prepare the Keys from the surrogates of particular filter resolver
+   *
+   * @param filterValues
+   * @param blockKeyGenerator
+   * @param dimension
+   * @param dimColumnExecuterInfo
+   */
+  public static void prepareKeysFromSurrogates(DimColumnFilterInfo filterValues,
+      KeyGenerator blockKeyGenerator, CarbonDimension dimension,
+      DimColumnExecuterFilterInfo dimColumnExecuterInfo) {
+    byte[][] keysBasedOnFilter = getKeyArray(filterValues, dimension, blockKeyGenerator);
+    dimColumnExecuterInfo.setFilterKeys(keysBasedOnFilter);
+
+  }
+
+  /**
+   * method will create a default end key in case of no end key is been derived using existing
+   * filter or in case of non filter queries.
+   *
+   * @param segmentProperties
+   * @return
+   * @throws KeyGenException
+   */
+  public static IndexKey prepareDefaultEndIndexKey(SegmentProperties segmentProperties)
+      throws KeyGenException {
+    long[] dictionarySurrogateKey =
+        new long[segmentProperties.getDimensions().size() - segmentProperties
+            .getNumberOfNoDictionaryDimension()];
+    Arrays.fill(dictionarySurrogateKey, Long.MAX_VALUE);
+    IndexKey endIndexKey;
+    byte[] dictionaryendMdkey =
+        segmentProperties.getDimensionKeyGenerator().generateKey(dictionarySurrogateKey);
+    byte[] noDictionaryEndKeyBuffer = getNoDictionaryDefaultEndKey(segmentProperties);
+    endIndexKey = new IndexKey(dictionaryendMdkey, noDictionaryEndKeyBuffer);
+    return endIndexKey;
+  }
+
+  public static byte[] getNoDictionaryDefaultEndKey(SegmentProperties segmentProperties) {
+    // in case of non filter query when no dictionary columns are present we
+    // need to set the default end key, as for non filter query
+    // we need to get the last
+    // block of the btree so we are setting the max byte value in the end key
+    ByteBuffer noDictionaryEndKeyBuffer = ByteBuffer.allocate(
+        (segmentProperties.getNumberOfNoDictionaryDimension()
+            * CarbonCommonConstants.SHORT_SIZE_IN_BYTE) + segmentProperties
+            .getNumberOfNoDictionaryDimension());
+    // end key structure will be
+    //<Offset of first No Dictionary key in 2 Bytes><Offset of second No Dictionary key in 2 Bytes>
+    //<Offset of n No Dictionary key in 2 Bytes><first no dictionary column value>
+    // <second no dictionary column value> <N no dictionary column value>
+    //example if we have 2 no dictionary column
+    //<[0,4,0,5,127,127]>
+    short startPoint = (short) (segmentProperties.getNumberOfNoDictionaryDimension()
+        * CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
+    for (int i = 0; i < segmentProperties.getNumberOfNoDictionaryDimension(); i++) {
+      noDictionaryEndKeyBuffer.putShort((startPoint));
+      startPoint++;
+    }
+    for (int i = 0; i < segmentProperties.getNumberOfNoDictionaryDimension(); i++) {
+      noDictionaryEndKeyBuffer.put((byte) 127);
+    }
+    return noDictionaryEndKeyBuffer.array();
+  }
+
+  /**
+   * method will create a default end key in case of no end key is been
+   * derived using existing filter or in case of non filter queries.
+   *
+   * @param segmentProperties
+   * @return
+   * @throws KeyGenException
+   */
+  public static IndexKey prepareDefaultStartIndexKey(SegmentProperties segmentProperties)
+      throws KeyGenException {
+    IndexKey startIndexKey;
+    long[] dictionarySurrogateKey =
+        new long[segmentProperties.getDimensions().size() - segmentProperties
+            .getNumberOfNoDictionaryDimension()];
+    byte[] dictionaryStartMdkey =
+        segmentProperties.getDimensionKeyGenerator().generateKey(dictionarySurrogateKey);
+    byte[] noDictionaryStartKeyArray = getNoDictionaryDefaultStartKey(segmentProperties);
+
+    startIndexKey = new IndexKey(dictionaryStartMdkey, noDictionaryStartKeyArray);
+    return startIndexKey;
+  }
+
+  public static byte[] getNoDictionaryDefaultStartKey(SegmentProperties segmentProperties) {
+    // in case of non filter query when no dictionary columns are present we
+    // need to set the default start key, as for non filter query we need to get the first
+    // block of the btree so we are setting the least byte value in the start key
+    ByteBuffer noDictionaryStartKeyBuffer = ByteBuffer.allocate(
+        (segmentProperties.getNumberOfNoDictionaryDimension()
+            * CarbonCommonConstants.SHORT_SIZE_IN_BYTE) + segmentProperties
+            .getNumberOfNoDictionaryDimension());
+    // end key structure will be
+    //<Offset of first No Dictionary key in 2 Bytes><Offset of second No Dictionary key in 2 Bytes>
+    //<Offset of n No Dictionary key in 2 Bytes><first no dictionary column value>
+    // <second no dictionary column value> <N no dictionary column value>
+    //example if we have 2 no dictionary column
+    //<[0,4,0,5,0,0]>
+    short startPoint = (short) (segmentProperties.getNumberOfNoDictionaryDimension()
+        * CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
+    for (int i = 0; i < segmentProperties.getNumberOfNoDictionaryDimension(); i++) {
+      noDictionaryStartKeyBuffer.putShort((startPoint));
+      startPoint++;
+    }
+    for (int i = 0; i < segmentProperties.getNumberOfNoDictionaryDimension(); i++) {
+      noDictionaryStartKeyBuffer.put((byte) 0);
+    }
+    return noDictionaryStartKeyBuffer.array();
+  }
+
+  public static int compareFilterKeyBasedOnDataType(String dictionaryVal, String memberVal,
+      DataType dataType) {
+    try {
+      switch (dataType) {
+        case SHORT:
+          return Short.compare((Short.parseShort(dictionaryVal)), (Short.parseShort(memberVal)));
+        case INT:
+          return Integer.compare((Integer.parseInt(dictionaryVal)), (Integer.parseInt(memberVal)));
+        case DOUBLE:
+          return Double
+              .compare((Double.parseDouble(dictionaryVal)), (Double.parseDouble(memberVal)));
+        case LONG:
+          return Long.compare((Long.parseLong(dictionaryVal)), (Long.parseLong(memberVal)));
+        case BOOLEAN:
+          return Boolean
+              .compare((Boolean.parseBoolean(dictionaryVal)), (Boolean.parseBoolean(memberVal)));
+        case TIMESTAMP:
+          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
+              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+          Date dateToStr;
+          Date dictionaryDate;
+          dateToStr = parser.parse(memberVal);
+          dictionaryDate = parser.parse(dictionaryVal);
+          return dictionaryDate.compareTo(dateToStr);
+
+        case DECIMAL:
+          java.math.BigDecimal javaDecValForDictVal = new java.math.BigDecimal(dictionaryVal);
+          java.math.BigDecimal javaDecValForMemberVal = new java.math.BigDecimal(memberVal);
+          return javaDecValForDictVal.compareTo(javaDecValForMemberVal);
+        default:
+          return -1;
+      }
+    } catch (Exception e) {
+      return -1;
+    }
+  }
+
+  /**
+   * method will set the start and end key for as per the filter resolver tree
+   * utilized visitor pattern inorder to populate the start and end key population.
+   *
+   * @param segmentProperties
+   * @param tableIdentifier
+   * @param filterResolver
+   * @param listOfStartEndKeys
+   * @throws QueryExecutionException
+   */
+  public static void traverseResolverTreeAndGetStartAndEndKey(SegmentProperties segmentProperties,
+      AbsoluteTableIdentifier tableIdentifier, FilterResolverIntf filterResolver,
+      List<IndexKey> listOfStartEndKeys) throws QueryExecutionException {
+    IndexKey searchStartKey = null;
+    IndexKey searchEndKey = null;
+    long[] startKey = new long[segmentProperties.getDimensionKeyGenerator().getDimCount()];
+    long[] endKey = new long[segmentProperties.getDimensionKeyGenerator().getDimCount()];
+    List<byte[]> listOfStartKeyByteArray =
+        new ArrayList<byte[]>(segmentProperties.getNumberOfNoDictionaryDimension());
+    List<byte[]> listOfEndKeyByteArray =
+        new ArrayList<byte[]>(segmentProperties.getNumberOfNoDictionaryDimension());
+    SortedMap<Integer, byte[]> setOfStartKeyByteArray = new TreeMap<Integer, byte[]>();
+    SortedMap<Integer, byte[]> setOfEndKeyByteArray = new TreeMap<Integer, byte[]>();
+    SortedMap<Integer, byte[]> defaultStartValues = new TreeMap<Integer, byte[]>();
+    SortedMap<Integer, byte[]> defaultEndValues = new TreeMap<Integer, byte[]>();
+    List<long[]> startKeyList = new ArrayList<long[]>();
+    List<long[]> endKeyList = new ArrayList<long[]>();
+    traverseResolverTreeAndPopulateStartAndEndKeys(filterResolver, tableIdentifier,
+        segmentProperties, startKey, setOfStartKeyByteArray, endKey, setOfEndKeyByteArray,
+        startKeyList, endKeyList);
+    if (endKeyList.size() > 0) {
+      //get the new end key from list
+      for (int i = 0; i < endKey.length; i++) {
+        long[] endkeyColumnLevel = new long[endKeyList.size()];
+        int j = 0;
+        for (long[] oneEndKey : endKeyList) {
+          //get each column level end key
+          endkeyColumnLevel[j++] = oneEndKey[i];
+        }
+        Arrays.sort(endkeyColumnLevel);
+        // get the max one as end of this column level
+        endKey[i] = endkeyColumnLevel[endkeyColumnLevel.length - 1];
+      }
+    }
+
+    if (startKeyList.size() > 0) {
+      //get the new start key from list
+      for (int i = 0; i < startKey.length; i++) {
+        long[] startkeyColumnLevel = new long[startKeyList.size()];
+        int j = 0;
+        for (long[] oneStartKey : startKeyList) {
+          //get each column level start key
+          startkeyColumnLevel[j++] = oneStartKey[i];
+        }
+        Arrays.sort(startkeyColumnLevel);
+        // get the min - 1 as start of this column level, for example if a block contains 5,6
+        // the filter is 6, but that block's start key is 5, if not -1, this block will missing.
+        startKey[i] = startkeyColumnLevel[0] - 1;
+      }
+    }
+
+    fillDefaultStartValue(defaultStartValues, segmentProperties);
+    fillDefaultEndValue(defaultEndValues, segmentProperties);
+    fillNullValuesStartIndexWithDefaultKeys(setOfStartKeyByteArray, segmentProperties);
+    fillNullValuesEndIndexWithDefaultKeys(setOfEndKeyByteArray, segmentProperties);
+    pruneStartAndEndKeys(setOfStartKeyByteArray, listOfStartKeyByteArray);
+    pruneStartAndEndKeys(setOfEndKeyByteArray, listOfEndKeyByteArray);
+
+    searchStartKey = FilterUtil
+        .createIndexKeyFromResolvedFilterVal(startKey, segmentProperties.getDimensionKeyGenerator(),
+            FilterUtil.getKeyWithIndexesAndValues(listOfStartKeyByteArray));
+
+    searchEndKey = FilterUtil
+        .createIndexKeyFromResolvedFilterVal(endKey, segmentProperties.getDimensionKeyGenerator(),
+            FilterUtil.getKeyWithIndexesAndValues(listOfEndKeyByteArray));
+    listOfStartEndKeys.add(searchStartKey);
+    listOfStartEndKeys.add(searchEndKey);
+
+  }
+
+  private static int compareFilterMembersBasedOnActualDataType(String filterMember1,
+      String filterMember2, DataType dataType) {
+    try {
+      switch (dataType) {
+        case SHORT:
+        case INT:
+        case LONG:
+        case DOUBLE:
+
+          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(filterMember1)) {
+            return 1;
+          }
+          Double d1 = Double.parseDouble(filterMember1);
+          Double d2 = Double.parseDouble(filterMember2);
+          return d1.compareTo(d2);
+        case DECIMAL:
+          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(filterMember1)) {
+            return 1;
+          }
+          java.math.BigDecimal val1 = new BigDecimal(filterMember1);
+          java.math.BigDecimal val2 = new BigDecimal(filterMember2);
+          return val1.compareTo(val2);
+        case TIMESTAMP:
+          if (CarbonCommonConstants.MEMBER_DEFAULT_VAL.equals(filterMember1)) {
+            return 1;
+          }
+          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
+              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+          Date date1 = null;
+          Date date2 = null;
+          date1 = parser.parse(filterMember1);
+          date2 = parser.parse(filterMember2);
+          return date1.compareTo(date2);
+        case STRING:
+        default:
+          return filterMember1.compareTo(filterMember2);
+      }
+    } catch (Exception e) {
+      return -1;
+    }
+  }
+
+  private static void fillNullValuesStartIndexWithDefaultKeys(
+      SortedMap<Integer, byte[]> setOfStartKeyByteArray, SegmentProperties segmentProperties) {
+    List<CarbonDimension> allDimension = segmentProperties.getDimensions();
+    for (CarbonDimension dimension : allDimension) {
+      if (CarbonUtil.hasEncoding(dimension.getEncoder(), Encoding.DICTIONARY)) {
+        continue;
+      }
+      if (null == setOfStartKeyByteArray.get(dimension.getOrdinal())) {
+        setOfStartKeyByteArray.put(dimension.getOrdinal(), new byte[] { 0 });
+      }
+
+    }
+  }
+
+  private static void fillNullValuesEndIndexWithDefaultKeys(
+      SortedMap<Integer, byte[]> setOfStartKeyByteArray, SegmentProperties segmentProperties) {
+    List<CarbonDimension> allDimension = segmentProperties.getDimensions();
+    for (CarbonDimension dimension : allDimension) {
+      if (CarbonUtil.hasEncoding(dimension.getEncoder(), Encoding.DICTIONARY)) {
+        continue;
+      }
+      if (null == setOfStartKeyByteArray.get(dimension.getOrdinal())) {
+        setOfStartKeyByteArray.put(dimension.getOrdinal(), new byte[] { 127 });
+      }
+
+    }
+  }
+
+  private static void pruneStartAndEndKeys(SortedMap<Integer, byte[]> setOfStartKeyByteArray,
+      List<byte[]> listOfStartKeyByteArray) {
+    for (Map.Entry<Integer, byte[]> entry : setOfStartKeyByteArray.entrySet()) {
+      listOfStartKeyByteArray.add(entry.getValue());
+    }
+  }
+
+  private static void fillDefaultStartValue(SortedMap<Integer, byte[]> setOfStartKeyByteArray,
+      SegmentProperties segmentProperties) {
+    List<CarbonDimension> allDimension = segmentProperties.getDimensions();
+    for (CarbonDimension dimension : allDimension) {
+      if (CarbonUtil.hasEncoding(dimension.getEncoder(), Encoding.DICTIONARY)) {
+        continue;
+      }
+      setOfStartKeyByteArray.put(dimension.getOrdinal(), new byte[] { 0 });
+    }
+
+  }
+
+  private static void fillDefaultEndValue(SortedMap<Integer, byte[]> setOfEndKeyByteArray,
+      SegmentProperties segmentProperties) {
+    List<CarbonDimension> allDimension = segmentProperties.getDimensions();
+    for (CarbonDimension dimension : allDimension) {
+      if (CarbonUtil.hasEncoding(dimension.getEncoder(), Encoding.DICTIONARY)) {
+        continue;
+      }
+      setOfEndKeyByteArray.put(dimension.getOrdinal(), new byte[] { 127 });
+    }
+  }
+
+  private static void traverseResolverTreeAndPopulateStartAndEndKeys(
+      FilterResolverIntf filterResolverTree, AbsoluteTableIdentifier tableIdentifier,
+      SegmentProperties segmentProperties, long[] startKeys,
+      SortedMap<Integer, byte[]> setOfStartKeyByteArray, long[] endKeys,
+      SortedMap<Integer, byte[]> setOfEndKeyByteArray, List<long[]> startKeyList,
+      List<long[]> endKeyList) throws QueryExecutionException {
+    if (null == filterResolverTree) {
+      return;
+    }
+    traverseResolverTreeAndPopulateStartAndEndKeys(filterResolverTree.getLeft(), tableIdentifier,
+        segmentProperties, startKeys, setOfStartKeyByteArray, endKeys, setOfEndKeyByteArray,
+        startKeyList, endKeyList);
+    filterResolverTree.getStartKey(startKeys, setOfStartKeyByteArray, startKeyList);
+    filterResolverTree.getEndKey(segmentProperties, tableIdentifier, endKeys, setOfEndKeyByteArray,
+        endKeyList);
+
+    traverseResolverTreeAndPopulateStartAndEndKeys(filterResolverTree.getRight(), tableIdentifier,
+        segmentProperties, startKeys, setOfStartKeyByteArray, endKeys, setOfEndKeyByteArray,
+        startKeyList, endKeyList);
+  }
+
+  /**
+   * Method will find whether the expression needs to be resolved, this can happen
+   * if the expression is exclude and data type is null(mainly in IS NOT NULL filter scenario)
+   *
+   * @param rightExp
+   * @param isIncludeFilter
+   * @return
+   */
+  public static boolean isExpressionNeedsToResolved(Expression rightExp, boolean isIncludeFilter) {
+    if (!isIncludeFilter && rightExp instanceof LiteralExpression && (
+        DataType.NULL == ((LiteralExpression) rightExp)
+            .getLiteralExpDataType())) {
+      return true;
+    }
+    for (Expression child : rightExp.getChildren()) {
+      if (isExpressionNeedsToResolved(child, isIncludeFilter)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * This method will print the error log.
+   *
+   * @param e
+   */
+  public static void logError(Throwable e, boolean invalidRowsPresent) {
+    if (!invalidRowsPresent) {
+      invalidRowsPresent = true;
+      LOGGER.error(e, CarbonCommonConstants.FILTER_INVALID_MEMBER + e.getMessage());
+    }
+  }
+
+  /**
+   * This method will return list of all the unknown expressions
+   *
+   * @param expression
+   */
+  public static List<UnknownExpression> getUnknownExpressionsList(Expression expression) {
+    List<UnknownExpression> listOfExp =
+        new ArrayList<UnknownExpression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    getUnknownExpressionsList(expression, listOfExp);
+    return listOfExp;
+  }
+
+  /**
+   * This method will prepare the list with all unknown expressions
+   *
+   * @param expression
+   * @param lst
+   */
+  private static void getUnknownExpressionsList(Expression expression,
+      List<UnknownExpression> lst) {
+    if (expression instanceof UnknownExpression) {
+      UnknownExpression colExp = (UnknownExpression) expression;
+      lst.add(colExp);
+      return;
+    }
+    for (Expression child : expression.getChildren()) {
+      getUnknownExpressionsList(child, lst);
+    }
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/GenericQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/GenericQueryType.java b/core/src/main/java/org/apache/carbondata/scan/filter/GenericQueryType.java
new file mode 100644
index 0000000..881489c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/GenericQueryType.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.filter;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+import org.apache.spark.sql.types.DataType;
+
+public interface GenericQueryType {
+
+  String getName();
+
+  void setName(String name);
+
+  String getParentname();
+
+  void setParentname(String parentname);
+
+  int getBlockIndex();
+
+  void setBlockIndex(int blockIndex);
+
+  void addChildren(GenericQueryType children);
+
+  void getAllPrimitiveChildren(List<GenericQueryType> primitiveChild);
+
+  int getSurrogateIndex();
+
+  void setSurrogateIndex(int surrIndex);
+
+  int getColsCount();
+
+  void setKeySize(int[] keyBlockSize);
+
+  int getKeyOrdinalForQuery();
+
+  void setKeyOrdinalForQuery(int keyOrdinalForQuery);
+
+  void parseBlocksAndReturnComplexColumnByteArray(DimensionColumnDataChunk[] dimensionDataChunks,
+      int rowNumber, DataOutputStream dataOutputStream) throws IOException;
+
+  DataType getSchemaType();
+
+  void parseAndGetResultBytes(ByteBuffer complexData, DataOutputStream dataOutput)
+      throws IOException;
+
+  void fillRequiredBlockData(BlocksChunkHolder blockChunkHolder);
+
+  Object getDataBasedOnDataTypeFromSurrogates(ByteBuffer surrogateData);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/AndFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/AndFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/AndFilterExecuterImpl.java
new file mode 100644
index 0000000..cce4fe2
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/AndFilterExecuterImpl.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.BitSet;
+
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public class AndFilterExecuterImpl implements FilterExecuter {
+
+  private FilterExecuter leftExecuter;
+  private FilterExecuter rightExecuter;
+
+  public AndFilterExecuterImpl(FilterExecuter leftExecuter, FilterExecuter rightExecuter) {
+    this.leftExecuter = leftExecuter;
+    this.rightExecuter = rightExecuter;
+  }
+
+  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder)
+      throws FilterUnsupportedException {
+    BitSet leftFilters = leftExecuter.applyFilter(blockChunkHolder);
+    if (leftFilters.isEmpty()) {
+      return leftFilters;
+    }
+    BitSet rightFilter = rightExecuter.applyFilter(blockChunkHolder);
+    if (rightFilter.isEmpty()) {
+      return rightFilter;
+    }
+    leftFilters.and(rightFilter);
+    return leftFilters;
+  }
+
+  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
+    BitSet leftFilters = leftExecuter.isScanRequired(blockMaxValue, blockMinValue);
+    if (leftFilters.isEmpty()) {
+      return leftFilters;
+    }
+    BitSet rightFilter = rightExecuter.isScanRequired(blockMaxValue, blockMinValue);
+    if (rightFilter.isEmpty()) {
+      return rightFilter;
+    }
+    leftFilters.and(rightFilter);
+    return leftFilters;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/DimColumnExecuterFilterInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/DimColumnExecuterFilterInfo.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/DimColumnExecuterFilterInfo.java
new file mode 100644
index 0000000..a54fb49
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/DimColumnExecuterFilterInfo.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+public class DimColumnExecuterFilterInfo {
+
+  byte[][] filterKeys;
+
+  public void setFilterKeys(byte[][] filterKeys) {
+    this.filterKeys = filterKeys;
+  }
+
+  public byte[][] getFilterKeys() {
+    return filterKeys;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
new file mode 100644
index 0000000..4ef2c15
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.scan.executor.infos.KeyStructureInfo;
+import org.apache.carbondata.scan.executor.util.QueryUtil;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+
+/**
+ * It checks if filter is required on given block and if required, it does
+ * linear search on block data and set the bitset.
+ */
+public class ExcludeColGroupFilterExecuterImpl extends ExcludeFilterExecuterImpl {
+
+  /**
+   * LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(ExcludeColGroupFilterExecuterImpl.class.getName());
+
+  /**
+   * @param dimColResolvedFilterInfo
+   * @param segmentProperties
+   */
+  public ExcludeColGroupFilterExecuterImpl(DimColumnResolvedFilterInfo dimColResolvedFilterInfo,
+      SegmentProperties segmentProperties) {
+    super(dimColResolvedFilterInfo, segmentProperties);
+  }
+
+  /**
+   * It fills BitSet with row index which matches filter key
+   */
+  protected BitSet getFilteredIndexes(DimensionColumnDataChunk dimensionColumnDataChunk,
+      int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    bitSet.flip(0, numerOfRows);
+    try {
+      KeyStructureInfo keyStructureInfo = getKeyStructureInfo();
+      byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+      for (int i = 0; i < filterValues.length; i++) {
+        byte[] filterVal = filterValues[i];
+        for (int rowId = 0; rowId < numerOfRows; rowId++) {
+          byte[] colData = new byte[keyStructureInfo.getMaskByteRanges().length];
+          dimensionColumnDataChunk.fillChunkData(colData, 0, rowId, keyStructureInfo);
+          if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, colData) == 0) {
+            bitSet.flip(rowId);
+          }
+        }
+      }
+
+    } catch (Exception e) {
+      LOGGER.error(e);
+    }
+
+    return bitSet;
+  }
+
+  /**
+   * It is required for extracting column data from columngroup chunk
+   *
+   * @return
+   * @throws KeyGenException
+   */
+  private KeyStructureInfo getKeyStructureInfo() throws KeyGenException {
+    int colGrpId = getColumnGroupId(dimColEvaluatorInfo.getColumnIndex());
+    KeyGenerator keyGenerator = segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
+    List<Integer> mdKeyOrdinal = new ArrayList<Integer>();
+    mdKeyOrdinal.add(getMdkeyOrdinal(dimColEvaluatorInfo.getColumnIndex(), colGrpId));
+    int[] maskByteRanges = QueryUtil.getMaskedByteRangeBasedOrdinal(mdKeyOrdinal, keyGenerator);
+    byte[] maxKey = QueryUtil.getMaxKeyBasedOnOrinal(mdKeyOrdinal, keyGenerator);
+    int[] maksedByte = QueryUtil.getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
+    KeyStructureInfo restructureInfos = new KeyStructureInfo();
+    restructureInfos.setKeyGenerator(keyGenerator);
+    restructureInfos.setMaskByteRanges(maskByteRanges);
+    restructureInfos.setMaxKey(maxKey);
+    restructureInfos.setMaskedBytes(maksedByte);
+    return restructureInfos;
+  }
+
+  /**
+   * Check if scan is required on given block based on min and max value
+   */
+  public BitSet isScanRequired(byte[][] blkMaxVal, byte[][] blkMinVal) {
+    BitSet bitSet = new BitSet(1);
+    bitSet.flip(0, 1);
+    return bitSet;
+  }
+
+  private int getMdkeyOrdinal(int ordinal, int colGrpId) {
+    return segmentProperties.getColumnGroupMdKeyOrdinal(colGrpId, ordinal);
+  }
+
+  private int getColumnGroupId(int ordinal) {
+    int[][] columnGroups = segmentProperties.getColumnGroups();
+    int colGrpId = -1;
+    for (int i = 0; i < columnGroups.length; i++) {
+      if (columnGroups[i].length > 1) {
+        colGrpId++;
+        if (QueryUtil.searchInArray(columnGroups[i], ordinal)) {
+          break;
+        }
+      }
+    }
+    return colGrpId;
+  }
+
+  public KeyGenerator getKeyGenerator(int colGrpId) {
+    return segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/ExcludeFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/ExcludeFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/ExcludeFilterExecuterImpl.java
new file mode 100644
index 0000000..5ebae79
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/ExcludeFilterExecuterImpl.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.FixedLengthDimensionDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.impl.VariableLengthDimensionDataChunk;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.scan.filter.FilterUtil;
+import org.apache.carbondata.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public class ExcludeFilterExecuterImpl implements FilterExecuter {
+
+  protected DimColumnResolvedFilterInfo dimColEvaluatorInfo;
+  protected DimColumnExecuterFilterInfo dimColumnExecuterInfo;
+  protected SegmentProperties segmentProperties;
+
+  public ExcludeFilterExecuterImpl(DimColumnResolvedFilterInfo dimColEvaluatorInfo,
+      SegmentProperties segmentProperties) {
+    this.dimColEvaluatorInfo = dimColEvaluatorInfo;
+    dimColumnExecuterInfo = new DimColumnExecuterFilterInfo();
+    this.segmentProperties = segmentProperties;
+    FilterUtil.prepareKeysFromSurrogates(dimColEvaluatorInfo.getFilterValues(),
+        segmentProperties.getDimensionKeyGenerator(), dimColEvaluatorInfo.getDimension(),
+        dimColumnExecuterInfo);
+  }
+
+  @Override public BitSet applyFilter(BlocksChunkHolder blockChunkHolder) {
+    int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping()
+        .get(dimColEvaluatorInfo.getColumnIndex());
+    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
+      blockChunkHolder.getDataBlock()
+          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
+    }
+    if (null == blockChunkHolder.getDimensionDataChunk()[blockIndex]) {
+      blockChunkHolder.getDimensionDataChunk()[blockIndex] = blockChunkHolder.getDataBlock()
+          .getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
+    }
+    return getFilteredIndexes(
+        blockChunkHolder.getDimensionDataChunk()[blockIndex],
+        blockChunkHolder.getDataBlock().nodeSize());
+  }
+
+  protected BitSet getFilteredIndexes(DimensionColumnDataChunk dimColumnDataChunk,
+      int numerOfRows) {
+    // For high cardinality dimensions.
+    if (dimColumnDataChunk.getAttributes().isNoDictionary()
+        && dimColumnDataChunk instanceof VariableLengthDimensionDataChunk) {
+      return setDirectKeyFilterIndexToBitSet((VariableLengthDimensionDataChunk) dimColumnDataChunk,
+          numerOfRows);
+    }
+    if (null != dimColumnDataChunk.getAttributes().getInvertedIndexes()
+        && dimColumnDataChunk instanceof FixedLengthDimensionDataChunk) {
+      return setFilterdIndexToBitSetWithColumnIndex(
+          (FixedLengthDimensionDataChunk) dimColumnDataChunk, numerOfRows);
+    }
+    return setFilterdIndexToBitSet((FixedLengthDimensionDataChunk) dimColumnDataChunk, numerOfRows);
+  }
+
+  private BitSet setDirectKeyFilterIndexToBitSet(
+      VariableLengthDimensionDataChunk dimColumnDataChunk, int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    bitSet.flip(0, numerOfRows);
+    List<byte[]> listOfColumnarKeyBlockDataForNoDictionaryVal =
+        dimColumnDataChunk.getCompleteDataChunk();
+    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    int[] columnIndexArray = dimColumnDataChunk.getAttributes().getInvertedIndexes();
+    int[] columnReverseIndexArray = dimColumnDataChunk.getAttributes().getInvertedIndexesReverse();
+    for (int i = 0; i < filterValues.length; i++) {
+      byte[] filterVal = filterValues[i];
+      if (null != listOfColumnarKeyBlockDataForNoDictionaryVal) {
+
+        if (null != columnReverseIndexArray) {
+          for (int index : columnIndexArray) {
+            byte[] noDictionaryVal =
+                listOfColumnarKeyBlockDataForNoDictionaryVal.get(columnReverseIndexArray[index]);
+            if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, noDictionaryVal) == 0) {
+              bitSet.flip(index);
+            }
+          }
+        } else if (null != columnIndexArray) {
+
+          for (int index : columnIndexArray) {
+            byte[] noDictionaryVal =
+                listOfColumnarKeyBlockDataForNoDictionaryVal.get(columnIndexArray[index]);
+            if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, noDictionaryVal) == 0) {
+              bitSet.flip(index);
+            }
+          }
+        } else {
+          for (int index = 0;
+               index < listOfColumnarKeyBlockDataForNoDictionaryVal.size(); index++) {
+            if (ByteUtil.UnsafeComparer.INSTANCE
+                .compareTo(filterVal, listOfColumnarKeyBlockDataForNoDictionaryVal.get(index))
+                == 0) {
+              bitSet.flip(index);
+            }
+          }
+
+        }
+
+      }
+    }
+    return bitSet;
+
+  }
+
+  private BitSet setFilterdIndexToBitSetWithColumnIndex(
+      FixedLengthDimensionDataChunk dimColumnDataChunk, int numerOfRows) {
+    int[] columnIndex = dimColumnDataChunk.getAttributes().getInvertedIndexes();
+    int startKey = 0;
+    int last = 0;
+    int startIndex = 0;
+    BitSet bitSet = new BitSet(numerOfRows);
+    bitSet.flip(0, numerOfRows);
+    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    for (int i = 0; i < filterValues.length; i++) {
+      startKey = CarbonUtil
+          .getFirstIndexUsingBinarySearch(dimColumnDataChunk, startIndex, numerOfRows - 1,
+              filterValues[i], false);
+      if (startKey < 0) {
+        continue;
+      }
+      bitSet.flip(columnIndex[startKey]);
+      last = startKey;
+      for (int j = startKey + 1; j < numerOfRows; j++) {
+        if (ByteUtil.UnsafeComparer.INSTANCE
+            .compareTo(dimColumnDataChunk.getCompleteDataChunk(), j * filterValues[i].length,
+                filterValues[i].length, filterValues[i], 0, filterValues[i].length) == 0) {
+          bitSet.flip(columnIndex[j]);
+          last++;
+        } else {
+          break;
+        }
+      }
+      startIndex = last;
+      if (startIndex >= numerOfRows) {
+        break;
+      }
+    }
+    return bitSet;
+  }
+
+  private BitSet setFilterdIndexToBitSet(FixedLengthDimensionDataChunk dimColumnDataChunk,
+      int numerOfRows) {
+    BitSet bitSet = new BitSet(numerOfRows);
+    bitSet.flip(0, numerOfRows);
+    byte[][] filterValues = dimColumnExecuterInfo.getFilterKeys();
+    for (int k = 0; k < filterValues.length; k++) {
+      for (int j = 0; j < numerOfRows; j++) {
+        if (ByteUtil.UnsafeComparer.INSTANCE
+            .compareTo(dimColumnDataChunk.getCompleteDataChunk(), j * filterValues[k].length,
+                filterValues[k].length, filterValues[k], 0, filterValues[k].length) == 0) {
+          bitSet.flip(j);
+        }
+      }
+    }
+    return bitSet;
+  }
+
+  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) {
+    BitSet bitSet = new BitSet(1);
+    bitSet.flip(0, 1);
+    return bitSet;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/filter/executer/FilterExecuter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/filter/executer/FilterExecuter.java b/core/src/main/java/org/apache/carbondata/scan/filter/executer/FilterExecuter.java
new file mode 100644
index 0000000..29a72da
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/filter/executer/FilterExecuter.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.filter.executer;
+
+import java.util.BitSet;
+
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+
+public interface FilterExecuter {
+
+  /**
+   * API will apply filter based on resolver instance
+   *
+   * @return
+   * @throws FilterUnsupportedException
+   */
+  BitSet applyFilter(BlocksChunkHolder blocksChunkHolder) throws FilterUnsupportedException;
+
+  /**
+   * API will verify whether the block can be shortlisted based on block
+   * max and min key.
+   *
+   * @param blockMaxValue, maximum value of the
+   * @param blockMinValue
+   * @return BitSet
+   */
+  BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue);
+}


[31/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/Expression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/Expression.java b/core/src/main/java/org/apache/carbondata/scan/expression/Expression.java
new file mode 100644
index 0000000..a1f2bcf
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/Expression.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public abstract class Expression implements Serializable {
+
+  private static final long serialVersionUID = -7568676723039530713L;
+  protected List<Expression> children =
+      new ArrayList<Expression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+
+  public abstract ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException;
+
+  public abstract ExpressionType getFilterExpressionType();
+
+  public List<Expression> getChildren() {
+    return children;
+  }
+
+  public abstract String getString();
+
+  // public abstract void  accept(ExpressionVisitor visitor);
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/ExpressionResult.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/ExpressionResult.java b/core/src/main/java/org/apache/carbondata/scan/expression/ExpressionResult.java
new file mode 100644
index 0000000..267f685
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/ExpressionResult.java
@@ -0,0 +1,472 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additiona   l information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression;
+
+import java.math.BigDecimal;
+import java.sql.Timestamp;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+
+public class ExpressionResult implements Comparable<ExpressionResult> {
+
+  private static final long serialVersionUID = 1L;
+  protected DataType dataType;
+
+  protected Object value;
+
+  private List<ExpressionResult> expressionResults;
+
+  public ExpressionResult(DataType dataType, Object value) {
+    this.dataType = dataType;
+    this.value = value;
+  }
+
+  public ExpressionResult(List<ExpressionResult> expressionResults) {
+    this.expressionResults = expressionResults;
+  }
+
+  public void set(DataType dataType, Object value) {
+    this.dataType = dataType;
+    this.value = value;
+    this.expressionResults = null;
+  }
+
+  public DataType getDataType() {
+    return dataType;
+  }
+
+  //CHECKSTYLE:OFF Approval No:Approval-V1R2C10_009
+  public Integer getInt() throws FilterIllegalMemberException {
+    if (value == null) {
+      return null;
+    }
+    try {
+      switch (this.getDataType()) {
+        case STRING:
+          try {
+            return Integer.parseInt(value.toString());
+          } catch (NumberFormatException e) {
+            throw new FilterIllegalMemberException(e);
+          }
+        case SHORT:
+          return ((Short) value).intValue();
+        case INT:
+        case DOUBLE:
+          if (value instanceof Double) {
+            return ((Double) value).intValue();
+          }
+          return (Integer) value;
+        case TIMESTAMP:
+          if (value instanceof Timestamp) {
+            return (int) (((Timestamp) value).getTime() % 1000);
+          } else {
+            return (Integer) value;
+          }
+        default:
+          throw new FilterIllegalMemberException(
+              "Cannot convert" + this.getDataType().name() + " to integer type value");
+      }
+
+    } catch (ClassCastException e) {
+      throw new FilterIllegalMemberException(
+          "Cannot convert" + this.getDataType().name() + " to Integer type value");
+    }
+  }
+
+  public Short getShort() throws FilterIllegalMemberException {
+    if (value == null) {
+      return null;
+    }
+    try {
+      switch (this.getDataType()) {
+        case STRING:
+          try {
+            return Short.parseShort(value.toString());
+          } catch (NumberFormatException e) {
+            throw new FilterIllegalMemberException(e);
+          }
+        case SHORT:
+        case INT:
+        case DOUBLE:
+
+          if (value instanceof Double) {
+            return ((Double) value).shortValue();
+          } else if (value instanceof Integer) {
+            return ((Integer) value).shortValue();
+          }
+          return (Short) value;
+
+        case TIMESTAMP:
+
+          if (value instanceof Timestamp) {
+            return (short) (((Timestamp) value).getTime() % 1000);
+          } else {
+            return (Short) value;
+          }
+
+        default:
+          throw new FilterIllegalMemberException(
+              "Cannot convert" + this.getDataType().name() + " to integer type value");
+      }
+
+    } catch (ClassCastException e) {
+      throw new FilterIllegalMemberException(
+          "Cannot convert" + this.getDataType().name() + " to Integer type value");
+    }
+  }
+
+  public String getString() throws FilterIllegalMemberException {
+    if (value == null) {
+      return null;
+    }
+    try {
+      switch (this.getDataType()) {
+        case TIMESTAMP:
+          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
+              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+          if (value instanceof Timestamp) {
+            return parser.format((Timestamp) value);
+          } else {
+            return parser.format(new Timestamp((long) value / 1000));
+          }
+
+        default:
+          return value.toString();
+      }
+    } catch (Exception e) {
+      throw new FilterIllegalMemberException(
+          "Cannot convert" + this.getDataType().name() + " to String type value");
+    }
+  }
+
+  public Double getDouble() throws FilterIllegalMemberException {
+    if (value == null) {
+      return null;
+    }
+    try {
+      switch (this.getDataType()) {
+        case STRING:
+          try {
+            return Double.parseDouble(value.toString());
+          } catch (NumberFormatException e) {
+            throw new FilterIllegalMemberException(e);
+          }
+        case SHORT:
+          return ((Short) value).doubleValue();
+        case INT:
+          return ((Integer) value).doubleValue();
+        case LONG:
+          return ((Long) value).doubleValue();
+        case DOUBLE:
+          return (Double) value;
+        case TIMESTAMP:
+          if (value instanceof Timestamp) {
+            return (double) ((Timestamp) value).getTime() * 1000;
+          } else {
+            return (Double) (value);
+          }
+        default:
+          throw new FilterIllegalMemberException(
+              "Cannot convert" + this.getDataType().name() + " to double type value");
+      }
+    } catch (ClassCastException e) {
+      throw new FilterIllegalMemberException(
+          "Cannot convert" + this.getDataType().name() + " to Double type value");
+    }
+  }
+  //CHECKSTYLE:ON
+
+  public Long getLong() throws FilterIllegalMemberException {
+    if (value == null) {
+      return null;
+    }
+    try {
+      switch (this.getDataType()) {
+        case STRING:
+          try {
+            return Long.parseLong(value.toString());
+          } catch (NumberFormatException e) {
+            throw new FilterIllegalMemberException(e);
+          }
+        case SHORT:
+          return ((Short) value).longValue();
+        case INT:
+          return (Long) value;
+        case LONG:
+          return (Long) value;
+        case DOUBLE:
+          return (Long) value;
+        case TIMESTAMP:
+          if (value instanceof Timestamp) {
+            return 1000 * ((Timestamp) value).getTime();
+          } else {
+            return (Long) value;
+          }
+        default:
+          throw new FilterIllegalMemberException(
+              "Cannot convert" + this.getDataType().name() + " to Long type value");
+      }
+    } catch (ClassCastException e) {
+      throw new FilterIllegalMemberException(
+          "Cannot convert" + this.getDataType().name() + " to Long type value");
+    }
+
+  }
+
+  //Add to judge for BigDecimal
+  public BigDecimal getDecimal() throws FilterIllegalMemberException {
+    if (value == null) {
+      return null;
+    }
+    try {
+      switch (this.getDataType()) {
+        case STRING:
+          try {
+            return new BigDecimal(value.toString());
+          } catch (NumberFormatException e) {
+            throw new FilterIllegalMemberException(e);
+          }
+        case SHORT:
+          return new BigDecimal((short) value);
+        case INT:
+          return new BigDecimal((int) value);
+        case LONG:
+          return new BigDecimal((long) value);
+        case DOUBLE:
+          return new BigDecimal(value.toString());
+        case DECIMAL:
+          return new BigDecimal(value.toString());
+        case TIMESTAMP:
+          if (value instanceof Timestamp) {
+            return new BigDecimal(1000 * ((Timestamp) value).getTime());
+          } else {
+            return new BigDecimal((long) value);
+          }
+        default:
+          throw new FilterIllegalMemberException(
+              "Cannot convert" + this.getDataType().name() + " to Long type value");
+      }
+    } catch (ClassCastException e) {
+      throw new FilterIllegalMemberException(
+          "Cannot convert" + this.getDataType().name() + " to Long type value");
+    }
+
+  }
+
+  public Long getTime() throws FilterIllegalMemberException {
+    if (value == null) {
+      return null;
+    }
+    try {
+      switch (this.getDataType()) {
+        case STRING:
+          // Currently the query engine layer only supports yyyy-MM-dd HH:mm:ss date format
+          // no matter in which format the data is been stored, so while retrieving the direct
+          // surrogate value for filter member first it should be converted in date form as per
+          // above format and needs to retrieve time stamp.
+          SimpleDateFormat parser =
+              new SimpleDateFormat(CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT);
+          Date dateToStr;
+          try {
+            dateToStr = parser.parse(value.toString());
+            return dateToStr.getTime() * 1000;
+          } catch (ParseException e) {
+            throw new FilterIllegalMemberException(
+                "Cannot convert" + this.getDataType().name() + " to Time/Long type value");
+          }
+        case SHORT:
+          return ((Short) value).longValue();
+        case INT:
+        case LONG:
+          return (Long) value;
+        case DOUBLE:
+          return (Long) value;
+        case TIMESTAMP:
+          if (value instanceof Timestamp) {
+            return ((Timestamp) value).getTime() * 1000;
+          } else {
+            return (Long) value;
+          }
+        default:
+          throw new FilterIllegalMemberException(
+              "Cannot convert" + this.getDataType().name() + " to Time/Long type value");
+      }
+    } catch (ClassCastException e) {
+      throw new FilterIllegalMemberException(
+          "Cannot convert" + this.getDataType().name() + " to Time/Long type value");
+    }
+
+  }
+
+  public Boolean getBoolean() throws FilterIllegalMemberException {
+    if (value == null) {
+      return null;
+    }
+    try {
+      switch (this.getDataType()) {
+        case STRING:
+          try {
+            return Boolean.parseBoolean(value.toString());
+          } catch (NumberFormatException e) {
+            throw new FilterIllegalMemberException(e);
+          }
+
+        case BOOLEAN:
+          return Boolean.parseBoolean(value.toString());
+
+        default:
+          throw new FilterIllegalMemberException(
+              "Cannot convert" + this.getDataType().name() + " to boolean type value");
+      }
+    } catch (ClassCastException e) {
+      throw new FilterIllegalMemberException(
+          "Cannot convert" + this.getDataType().name() + " to Boolean type value");
+    }
+  }
+
+  public List<ExpressionResult> getList() {
+    if (null == expressionResults) {
+      List<ExpressionResult> a = new ArrayList<ExpressionResult>(20);
+      a.add(new ExpressionResult(dataType, value));
+      return a;
+    } else {
+      return expressionResults;
+    }
+  }
+
+  public List<String> getListAsString() throws FilterIllegalMemberException {
+    List<String> evaluateResultListFinal = new ArrayList<String>(20);
+    List<ExpressionResult> evaluateResultList = getList();
+    for (ExpressionResult result : evaluateResultList) {
+      if (result.getString() == null) {
+        evaluateResultListFinal.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL);
+        continue;
+      }
+      evaluateResultListFinal.add(result.getString());
+    }
+    return evaluateResultListFinal;
+  }
+
+  @Override public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    if (null != expressionResults) {
+      result = prime * result + expressionResults.hashCode();
+    } else if (null != value) {
+      result = prime * result + value.toString().hashCode();
+    } else {
+      result = prime * result + "".hashCode();
+    }
+
+    return result;
+  }
+
+  @Override public boolean equals(Object obj) {
+    if (!(obj instanceof ExpressionResult)) {
+      return false;
+    }
+    if (this == obj) {
+      return true;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    ExpressionResult objToCompare = (ExpressionResult) obj;
+    boolean result = false;
+    if (this.value == objToCompare.value) {
+      return true;
+    }
+    try {
+      switch (this.getDataType()) {
+        case STRING:
+          result = this.getString().equals(objToCompare.getString());
+          break;
+        case SHORT:
+          result = this.getShort().equals(objToCompare.getShort());
+          break;
+        case INT:
+          result = this.getInt().equals(objToCompare.getInt());
+          break;
+        case LONG:
+        case TIMESTAMP:
+          result = this.getLong().equals(objToCompare.getLong());
+          break;
+        case DOUBLE:
+          result = this.getDouble().equals(objToCompare.getDouble());
+          break;
+        case DECIMAL:
+          result = this.getDecimal().equals(objToCompare.getDecimal());
+          break;
+        default:
+          break;
+      }
+    } catch (FilterIllegalMemberException ex) {
+      return false;
+    }
+
+    return result;
+  }
+
+  public boolean isNull() {
+    return value == null;
+  }
+
+  @Override public int compareTo(ExpressionResult o) {
+    try {
+      switch (o.dataType) {
+        case SHORT:
+        case INT:
+        case LONG:
+        case DOUBLE:
+          Double d1 = this.getDouble();
+          Double d2 = o.getDouble();
+          return d1.compareTo(d2);
+        case DECIMAL:
+          java.math.BigDecimal val1 = this.getDecimal();
+          java.math.BigDecimal val2 = o.getDecimal();
+          return val1.compareTo(val2);
+        case TIMESTAMP:
+          SimpleDateFormat parser = new SimpleDateFormat(CarbonProperties.getInstance()
+              .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                  CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT));
+          Date date1 = null;
+          Date date2 = null;
+          date1 = parser.parse(this.getString());
+          date2 = parser.parse(o.getString());
+          return date1.compareTo(date2);
+        case STRING:
+        default:
+          return this.getString().compareTo(o.getString());
+      }
+    } catch (Exception e) {
+      return -1;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/LeafExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/LeafExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/LeafExpression.java
new file mode 100644
index 0000000..25c9ae6
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/LeafExpression.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression;
+
+public abstract class LeafExpression extends Expression {
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/LiteralExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/LiteralExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/LiteralExpression.java
new file mode 100644
index 0000000..671c209
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/LiteralExpression.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class LiteralExpression extends LeafExpression {
+
+  /**
+   *
+   */
+  private static final long serialVersionUID = 1L;
+  private Object value;
+  private DataType dataType;
+
+  public LiteralExpression(Object value, DataType dataType) {
+    this.value = value;
+    this.dataType = dataType;
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value) {
+    ExpressionResult expressionResult = new ExpressionResult(dataType, this.value);
+    return expressionResult;
+  }
+
+  public ExpressionResult getExpressionResult() {
+    ExpressionResult expressionResult = new ExpressionResult(dataType, this.value);
+    return expressionResult;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    // TODO Auto-generated method stub
+    return ExpressionType.LITERAL;
+  }
+
+  @Override public String getString() {
+    // TODO Auto-generated method stub
+    return "LiteralExpression(" + value + ')';
+  }
+
+  /**
+   * getLiteralExpDataType.
+   *
+   * @return
+   */
+  public DataType getLiteralExpDataType() {
+    return dataType;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/UnaryExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/UnaryExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/UnaryExpression.java
new file mode 100644
index 0000000..64be9a5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/UnaryExpression.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression;
+
+public abstract class UnaryExpression extends Expression {
+
+  private static final long serialVersionUID = 1L;
+  protected Expression child;
+
+  public UnaryExpression(Expression child) {
+    this.child = child;
+    children.add(child);
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/UnknownExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/UnknownExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/UnknownExpression.java
new file mode 100644
index 0000000..01d813d
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/UnknownExpression.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression;
+
+import java.util.List;
+
+public abstract class UnknownExpression extends Expression {
+
+  public abstract List<ColumnExpression> getAllColumnList();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/AddExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/AddExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/AddExpression.java
new file mode 100644
index 0000000..5c5b2cd
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/AddExpression.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.arithmetic;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class AddExpression extends BinaryArithmeticExpression {
+  private static final long serialVersionUID = 7999436055420911612L;
+
+  public AddExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult addExprLeftRes = left.evaluate(value);
+    ExpressionResult addExprRightRes = right.evaluate(value);
+    ExpressionResult val1 = addExprLeftRes;
+    ExpressionResult val2 = addExprRightRes;
+    if (addExprLeftRes.isNull() || addExprRightRes.isNull()) {
+      addExprLeftRes.set(addExprLeftRes.getDataType(), null);
+      return addExprLeftRes;
+    }
+
+    if (addExprLeftRes.getDataType() != addExprRightRes.getDataType()) {
+      if (addExprLeftRes.getDataType().getPresedenceOrder() < addExprRightRes.getDataType()
+          .getPresedenceOrder()) {
+        val2 = addExprLeftRes;
+        val1 = addExprRightRes;
+      }
+    }
+    switch (val1.getDataType()) {
+      case STRING:
+      case DOUBLE:
+        addExprRightRes.set(DataType.DOUBLE, val1.getDouble() + val2.getDouble());
+        break;
+      case SHORT:
+        addExprRightRes.set(DataType.SHORT, val1.getShort() + val2.getShort());
+        break;
+      case INT:
+        addExprRightRes.set(DataType.INT, val1.getInt() + val2.getInt());
+        break;
+      case LONG:
+        addExprRightRes.set(DataType.LONG, val1.getLong() + val2.getLong());
+        break;
+      case DECIMAL:
+        addExprRightRes.set(DataType.DECIMAL, val1.getDecimal().add(val2.getDecimal()));
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "Incompatible datatype for applying Add Expression Filter " + val1.getDataType());
+    }
+    return addExprRightRes;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.ADD;
+  }
+
+  @Override public String getString() {
+    return "Add(" + left.getString() + ',' + right.getString() + ',';
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/BinaryArithmeticExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/BinaryArithmeticExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/BinaryArithmeticExpression.java
new file mode 100644
index 0000000..3ccd92e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/BinaryArithmeticExpression.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.arithmetic;
+
+import org.apache.carbondata.scan.expression.BinaryExpression;
+import org.apache.carbondata.scan.expression.Expression;
+
+public abstract class BinaryArithmeticExpression extends BinaryExpression {
+
+  private static final long serialVersionUID = 1L;
+
+  public BinaryArithmeticExpression(Expression left, Expression right) {
+    super(left, right);
+    // TODO Auto-generated constructor stub
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/DivideExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/DivideExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/DivideExpression.java
new file mode 100644
index 0000000..a6b1085
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/DivideExpression.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.arithmetic;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class DivideExpression extends BinaryArithmeticExpression {
+  private static final long serialVersionUID = -7269266926782365612L;
+
+  public DivideExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult divideExprLeftRes = left.evaluate(value);
+    ExpressionResult divideExprRightRes = right.evaluate(value);
+    ExpressionResult val1 = divideExprLeftRes;
+    ExpressionResult val2 = divideExprRightRes;
+    if (divideExprLeftRes.isNull() || divideExprRightRes.isNull()) {
+      divideExprLeftRes.set(divideExprLeftRes.getDataType(), null);
+      return divideExprLeftRes;
+    }
+    if (divideExprLeftRes.getDataType() != divideExprRightRes.getDataType()) {
+      if (divideExprLeftRes.getDataType().getPresedenceOrder() < divideExprRightRes.getDataType()
+          .getPresedenceOrder()) {
+        val2 = divideExprLeftRes;
+        val1 = divideExprRightRes;
+      }
+    }
+    switch (val1.getDataType()) {
+      case STRING:
+      case DOUBLE:
+        divideExprRightRes.set(DataType.DOUBLE, val1.getDouble() / val2.getDouble());
+        break;
+      case SHORT:
+        divideExprRightRes.set(DataType.SHORT, val1.getShort() / val2.getShort());
+        break;
+      case INT:
+        divideExprRightRes.set(DataType.INT, val1.getInt() / val2.getInt());
+        break;
+      case LONG:
+        divideExprRightRes.set(DataType.LONG, val1.getLong() / val2.getLong());
+        break;
+      case DECIMAL:
+        divideExprRightRes.set(DataType.DECIMAL, val1.getDecimal().divide(val2.getDecimal()));
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "Incompatible datatype for applying Add Expression Filter " + divideExprLeftRes
+                .getDataType());
+    }
+    return divideExprRightRes;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.DIVIDE;
+  }
+
+  @Override public String getString() {
+    return "Divide(" + left.getString() + ',' + right.getString() + ')';
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/MultiplyExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/MultiplyExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/MultiplyExpression.java
new file mode 100644
index 0000000..4aaf40c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/MultiplyExpression.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.arithmetic;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class MultiplyExpression extends BinaryArithmeticExpression {
+  private static final long serialVersionUID = 1L;
+
+  public MultiplyExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult multiplyExprLeftRes = left.evaluate(value);
+    ExpressionResult multiplyExprRightRes = right.evaluate(value);
+    ExpressionResult val1 = multiplyExprLeftRes;
+    ExpressionResult val2 = multiplyExprRightRes;
+    if (multiplyExprLeftRes.isNull() || multiplyExprRightRes.isNull()) {
+      multiplyExprLeftRes.set(multiplyExprLeftRes.getDataType(), null);
+      return multiplyExprLeftRes;
+    }
+
+    if (multiplyExprLeftRes.getDataType() != multiplyExprRightRes.getDataType()) {
+      if (multiplyExprLeftRes.getDataType().getPresedenceOrder() < multiplyExprRightRes
+          .getDataType().getPresedenceOrder()) {
+        val2 = multiplyExprLeftRes;
+        val1 = multiplyExprRightRes;
+      }
+    }
+    switch (val1.getDataType()) {
+      case STRING:
+      case DOUBLE:
+        multiplyExprRightRes.set(DataType.DOUBLE, val1.getDouble() * val2.getDouble());
+        break;
+      case SHORT:
+        multiplyExprRightRes.set(DataType.SHORT, val1.getShort() * val2.getShort());
+        break;
+      case INT:
+        multiplyExprRightRes.set(DataType.INT, val1.getInt() * val2.getInt());
+        break;
+      case LONG:
+        multiplyExprRightRes.set(DataType.LONG, val1.getLong() * val2.getLong());
+        break;
+      case DECIMAL:
+        multiplyExprRightRes.set(DataType.DECIMAL, val1.getDecimal().multiply(val2.getDecimal()));
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "Incompatible datatype for applying Add Expression Filter " + multiplyExprLeftRes
+                .getDataType());
+    }
+    return multiplyExprRightRes;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.MULTIPLY;
+  }
+
+  @Override public String getString() {
+    return "Substract(" + left.getString() + ',' + right.getString() + ')';
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/SubstractExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/SubstractExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/SubstractExpression.java
new file mode 100644
index 0000000..ca16484
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/arithmetic/SubstractExpression.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.arithmetic;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class SubstractExpression extends BinaryArithmeticExpression {
+
+  private static final long serialVersionUID = -8304726440185363102L;
+
+  public SubstractExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult subtractExprLeftRes = left.evaluate(value);
+    ExpressionResult subtractExprRightRes = right.evaluate(value);
+    ExpressionResult val1 = subtractExprLeftRes;
+    ExpressionResult val2 = subtractExprRightRes;
+    if (subtractExprLeftRes.isNull() || subtractExprRightRes.isNull()) {
+      subtractExprLeftRes.set(subtractExprLeftRes.getDataType(), null);
+      return subtractExprLeftRes;
+    }
+    if (subtractExprLeftRes.getDataType() != subtractExprRightRes.getDataType()) {
+      if (subtractExprLeftRes.getDataType().getPresedenceOrder() < subtractExprRightRes
+          .getDataType().getPresedenceOrder()) {
+        val2 = subtractExprLeftRes;
+        val1 = subtractExprRightRes;
+      }
+    }
+    switch (val1.getDataType()) {
+      case STRING:
+      case DOUBLE:
+        subtractExprRightRes.set(DataType.DOUBLE, val1.getDouble() - val2.getDouble());
+        break;
+      case SHORT:
+        subtractExprRightRes.set(DataType.SHORT, val1.getShort() - val2.getShort());
+        break;
+      case INT:
+        subtractExprRightRes.set(DataType.INT, val1.getInt() - val2.getInt());
+        break;
+      case LONG:
+        subtractExprRightRes.set(DataType.LONG, val1.getLong() - val2.getLong());
+        break;
+      case DECIMAL:
+        subtractExprRightRes
+            .set(DataType.DECIMAL, val1.getDecimal().subtract(val2.getDecimal()));
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "Incompatible datatype for applying Add Expression Filter " + subtractExprLeftRes
+                .getDataType());
+    }
+    return subtractExprRightRes;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.SUBSTRACT;
+  }
+
+  @Override public String getString() {
+    return "Substract(" + left.getString() + ',' + right.getString() + ')';
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/BinaryConditionalExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/BinaryConditionalExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/BinaryConditionalExpression.java
new file mode 100644
index 0000000..3d7e15e
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/BinaryConditionalExpression.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.logical.BinaryLogicalExpression;
+
+public abstract class BinaryConditionalExpression extends BinaryLogicalExpression
+    implements ConditionalExpression {
+
+  /**
+   *
+   */
+  private static final long serialVersionUID = 1L;
+
+  public BinaryConditionalExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/ConditionalExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/ConditionalExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/ConditionalExpression.java
new file mode 100644
index 0000000..a87231f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/ConditionalExpression.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import java.util.List;
+
+import org.apache.carbondata.scan.expression.ColumnExpression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+
+public interface ConditionalExpression {
+
+  // Will get the column informations involved in the expressions by
+  // traversing the tree
+  List<ColumnExpression> getColumnList();
+
+  boolean isSingleDimension();
+
+  List<ExpressionResult> getLiterals();
+
+  /**
+   * will return the flag of direct dictionary column
+   *
+   * @return
+   */
+  boolean isDirectDictionaryColumns();
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/EqualToExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/EqualToExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/EqualToExpression.java
new file mode 100644
index 0000000..12a3e32
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/EqualToExpression.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class EqualToExpression extends BinaryConditionalExpression {
+
+  private static final long serialVersionUID = 1L;
+  private boolean isNull;
+
+  public EqualToExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  public EqualToExpression(Expression left, Expression right, boolean isNull) {
+    super(left, right);
+    this.isNull = isNull;
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult elRes = left.evaluate(value);
+    ExpressionResult erRes = right.evaluate(value);
+
+    boolean result = false;
+
+    ExpressionResult val1 = elRes;
+    ExpressionResult val2 = erRes;
+
+    if (elRes.isNull() || erRes.isNull()) {
+      if (isNull) {
+        elRes.set(DataType.BOOLEAN, elRes.isNull() == erRes.isNull());
+      } else {
+        elRes.set(DataType.BOOLEAN, false);
+      }
+      return elRes;
+    }
+    //default implementation if the data types are different for the resultsets
+    if (elRes.getDataType() != erRes.getDataType()) {
+      if (elRes.getDataType().getPresedenceOrder() < erRes.getDataType().getPresedenceOrder()) {
+        val2 = elRes;
+        val1 = erRes;
+      }
+    }
+
+    switch (val1.getDataType()) {
+      case STRING:
+        result = val1.getString().equals(val2.getString());
+        break;
+      case SHORT:
+        result = val1.getShort().equals(val2.getShort());
+        break;
+      case INT:
+        result = val1.getInt().equals(val2.getInt());
+        break;
+      case DOUBLE:
+        result = val1.getDouble().equals(val2.getDouble());
+        break;
+      case TIMESTAMP:
+        result = val1.getTime().equals(val2.getTime());
+        break;
+      case LONG:
+        result = val1.getLong().equals(val2.getLong());
+        break;
+      case DECIMAL:
+        result = val1.getDecimal().compareTo(val2.getDecimal()) == 0;
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "DataType: " + val1.getDataType() + " not supported for the filter expression");
+    }
+    val1.set(DataType.BOOLEAN, result);
+    return val1;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.EQUALS;
+  }
+
+  @Override public String getString() {
+    return "EqualTo(" + left.getString() + ',' + right.getString() + ')';
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/GreaterThanEqualToExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/GreaterThanEqualToExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/GreaterThanEqualToExpression.java
new file mode 100644
index 0000000..4b9b8ff
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/GreaterThanEqualToExpression.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class GreaterThanEqualToExpression extends BinaryConditionalExpression {
+  private static final long serialVersionUID = 4185317066280688984L;
+
+  public GreaterThanEqualToExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult elRes = left.evaluate(value);
+    ExpressionResult erRes = right.evaluate(value);
+    ExpressionResult exprResVal1 = elRes;
+    if (elRes.isNull() || erRes.isNull()) {
+      elRes.set(DataType.BOOLEAN, false);
+      return elRes;
+    }
+    if (elRes.getDataType() != erRes.getDataType()) {
+      if (elRes.getDataType().getPresedenceOrder() < erRes.getDataType().getPresedenceOrder()) {
+        exprResVal1 = erRes;
+      }
+
+    }
+    boolean result = false;
+    switch (exprResVal1.getDataType()) {
+      case STRING:
+        result = elRes.getString().compareTo(erRes.getString()) >= 0;
+        break;
+      case SHORT:
+        result = elRes.getShort() >= (erRes.getShort());
+        break;
+      case INT:
+        result = elRes.getInt() >= (erRes.getInt());
+        break;
+      case DOUBLE:
+        result = elRes.getDouble() >= (erRes.getDouble());
+        break;
+      case TIMESTAMP:
+        result = elRes.getTime() >= (erRes.getTime());
+        break;
+      case LONG:
+        result = elRes.getLong() >= (erRes.getLong());
+        break;
+      case DECIMAL:
+        result = elRes.getDecimal().compareTo(erRes.getDecimal()) >= 0;
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "DataType: " + exprResVal1.getDataType() + " not supported for the filter expression");
+    }
+    exprResVal1.set(DataType.BOOLEAN, result);
+    return exprResVal1;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.GREATERTHAN_EQUALTO;
+  }
+
+  @Override public String getString() {
+    return "GreaterThanEqualTo(" + left.getString() + ',' + right.getString() + ')';
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/GreaterThanExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/GreaterThanExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/GreaterThanExpression.java
new file mode 100644
index 0000000..d4e8ad8
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/GreaterThanExpression.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+
+public class GreaterThanExpression extends BinaryConditionalExpression {
+  private static final long serialVersionUID = -5319109756575539219L;
+
+  public GreaterThanExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult exprLeftRes = left.evaluate(value);
+    ExpressionResult exprRightRes = right.evaluate(value);
+    ExpressionResult val1 = exprLeftRes;
+    if (exprLeftRes.isNull() || exprRightRes.isNull()) {
+      exprLeftRes.set(DataType.BOOLEAN, false);
+      return exprLeftRes;
+    }
+    if (exprLeftRes.getDataType() != exprRightRes.getDataType()) {
+      if (exprLeftRes.getDataType().getPresedenceOrder() < exprRightRes.getDataType()
+          .getPresedenceOrder()) {
+        val1 = exprRightRes;
+      }
+
+    }
+    boolean result = false;
+    switch (val1.getDataType()) {
+      case STRING:
+        result = exprLeftRes.getString().compareTo(exprRightRes.getString()) > 0;
+        break;
+      case DOUBLE:
+        result = exprLeftRes.getDouble() > (exprRightRes.getDouble());
+        break;
+      case SHORT:
+        result = exprLeftRes.getShort() > (exprRightRes.getShort());
+        break;
+      case INT:
+        result = exprLeftRes.getInt() > (exprRightRes.getInt());
+        break;
+      case TIMESTAMP:
+        result = exprLeftRes.getTime() > (exprRightRes.getTime());
+        break;
+      case LONG:
+        result = exprLeftRes.getLong() > (exprRightRes.getLong());
+        break;
+      case DECIMAL:
+        result = exprLeftRes.getDecimal().compareTo(exprRightRes.getDecimal()) > 0;
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "DataType: " + val1.getDataType() + " not supported for the filter expression");
+    }
+    val1.set(DataType.BOOLEAN, result);
+    return val1;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.GREATERTHAN;
+  }
+
+  @Override public String getString() {
+    return "GreaterThan(" + left.getString() + ',' + right.getString() + ')';
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/InExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/InExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/InExpression.java
new file mode 100644
index 0000000..33888c9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/InExpression.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class InExpression extends BinaryConditionalExpression {
+  private static final long serialVersionUID = -3149927446694175489L;
+
+  protected transient Set<ExpressionResult> setOfExprResult;
+
+  public InExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult leftRsult = left.evaluate(value);
+
+    if (setOfExprResult == null) {
+      ExpressionResult rightRsult = right.evaluate(value);
+      ExpressionResult val = null;
+      setOfExprResult = new HashSet<ExpressionResult>(10);
+      for (ExpressionResult expressionResVal : rightRsult.getList()) {
+        if (expressionResVal.getDataType().getPresedenceOrder() < leftRsult.getDataType()
+            .getPresedenceOrder()) {
+          val = leftRsult;
+        } else {
+          val = expressionResVal;
+        }
+        switch (val.getDataType()) {
+          case STRING:
+            val = new ExpressionResult(val.getDataType(), expressionResVal.getString());
+            break;
+          case SHORT:
+            val = new ExpressionResult(val.getDataType(), expressionResVal.getShort());
+            break;
+          case INT:
+            val = new ExpressionResult(val.getDataType(), expressionResVal.getInt());
+            break;
+          case DOUBLE:
+            val = new ExpressionResult(val.getDataType(), expressionResVal.getDouble());
+            break;
+          case LONG:
+            val = new ExpressionResult(val.getDataType(), expressionResVal.getLong());
+            break;
+          case TIMESTAMP:
+            val = new ExpressionResult(val.getDataType(), expressionResVal.getTime());
+            break;
+          case DECIMAL:
+            val = new ExpressionResult(val.getDataType(), expressionResVal.getDecimal());
+            break;
+          default:
+            throw new FilterUnsupportedException(
+                "DataType: " + val.getDataType() + " not supported for the filter expression");
+        }
+        setOfExprResult.add(val);
+      }
+    }
+    leftRsult.set(DataType.BOOLEAN, setOfExprResult.contains(leftRsult));
+    return leftRsult;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.IN;
+  }
+
+  @Override public String getString() {
+    return "IN(" + left.getString() + ',' + right.getString() + ')';
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/LessThanEqualToExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/LessThanEqualToExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/LessThanEqualToExpression.java
new file mode 100644
index 0000000..0e18db9
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/LessThanEqualToExpression.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class LessThanEqualToExpression extends BinaryConditionalExpression {
+  private static final long serialVersionUID = 1L;
+
+  public LessThanEqualToExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult elRes = left.evaluate(value);
+    ExpressionResult erRes = right.evaluate(value);
+    ExpressionResult exprResValue1 = elRes;
+    if (elRes.isNull() || erRes.isNull()) {
+      elRes.set(DataType.BOOLEAN, false);
+      return elRes;
+    }
+    if (elRes.getDataType() != erRes.getDataType()) {
+      if (elRes.getDataType().getPresedenceOrder() < erRes.getDataType().getPresedenceOrder()) {
+        exprResValue1 = erRes;
+      }
+
+    }
+    boolean result = false;
+    switch (exprResValue1.getDataType()) {
+      case STRING:
+        result = elRes.getString().compareTo(erRes.getString()) <= 0;
+        break;
+      case SHORT:
+        result = elRes.getShort() <= (erRes.getShort());
+        break;
+      case INT:
+        result = elRes.getInt() <= (erRes.getInt());
+        break;
+      case DOUBLE:
+        result = elRes.getDouble() <= (erRes.getDouble());
+        break;
+      case TIMESTAMP:
+        result = elRes.getTime() <= (erRes.getTime());
+        break;
+      case LONG:
+        result = elRes.getLong() <= (erRes.getLong());
+        break;
+      case DECIMAL:
+        result = elRes.getDecimal().compareTo(erRes.getDecimal()) <= 0;
+        break;
+      default:
+        throw new FilterUnsupportedException("DataType: " + exprResValue1.getDataType()
+            + " not supported for the filter expression");
+    }
+    exprResValue1.set(DataType.BOOLEAN, result);
+    return exprResValue1;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    // TODO Auto-generated method stub
+    return ExpressionType.LESSTHAN_EQUALTO;
+  }
+
+  @Override public String getString() {
+    return "LessThanEqualTo(" + left.getString() + ',' + right.getString() + ')';
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/LessThanExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/LessThanExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/LessThanExpression.java
new file mode 100644
index 0000000..8873d72
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/LessThanExpression.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class LessThanExpression extends BinaryConditionalExpression {
+
+  private static final long serialVersionUID = 6343040416663699924L;
+
+  public LessThanExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult erRes = right.evaluate(value);
+    ExpressionResult elRes = left.evaluate(value);
+
+    ExpressionResult val1 = elRes;
+
+    boolean result = false;
+
+    if (elRes.isNull() || erRes.isNull()) {
+      elRes.set(DataType.BOOLEAN, false);
+      return elRes;
+    }
+    if (elRes.getDataType() != erRes.getDataType()) {
+      if (elRes.getDataType().getPresedenceOrder() < erRes.getDataType().getPresedenceOrder()) {
+        val1 = erRes;
+      }
+
+    }
+    switch (val1.getDataType()) {
+      case STRING:
+        result = elRes.getString().compareTo(erRes.getString()) < 0;
+        break;
+      case SHORT:
+        result = elRes.getShort() < (erRes.getShort());
+        break;
+      case INT:
+        result = elRes.getInt() < (erRes.getInt());
+        break;
+      case DOUBLE:
+        result = elRes.getDouble() < (erRes.getDouble());
+        break;
+      case TIMESTAMP:
+        result = elRes.getTime() < (erRes.getTime());
+        break;
+      case LONG:
+        result = elRes.getLong() < (erRes.getLong());
+        break;
+      case DECIMAL:
+        result = elRes.getDecimal().compareTo(erRes.getDecimal()) < 0;
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "DataType: " + val1.getDataType() + " not supported for the filter expression");
+    }
+    val1.set(DataType.BOOLEAN, result);
+    return val1;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.LESSTHAN;
+  }
+
+  @Override public String getString() {
+    return "LessThan(" + left.getString() + ',' + right.getString() + ')';
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/ListExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/ListExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/ListExpression.java
new file mode 100644
index 0000000..e57d48a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/ListExpression.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class ListExpression extends Expression {
+  private static final long serialVersionUID = 1L;
+
+  public ListExpression(List<Expression> children) {
+    this.children = children;
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value) throws FilterUnsupportedException {
+    List<ExpressionResult> listOfExprRes = new ArrayList<ExpressionResult>(10);
+
+    for (Expression expr : children) {
+      try {
+        listOfExprRes.add(expr.evaluate(value));
+      } catch (FilterIllegalMemberException e) {
+        continue;
+      }
+    }
+    return new ExpressionResult(listOfExprRes);
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    // TODO Auto-generated method stub
+    return ExpressionType.LIST;
+  }
+
+  @Override public String getString() {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/NotEqualsExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/NotEqualsExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/NotEqualsExpression.java
new file mode 100644
index 0000000..40f761b
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/NotEqualsExpression.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class NotEqualsExpression extends BinaryConditionalExpression {
+
+  private static final long serialVersionUID = 8684006025540863973L;
+  private boolean isNotNull = false;
+  public NotEqualsExpression(Expression left, Expression right, boolean isNotNull) {
+    super(left, right);
+    this.isNotNull = isNotNull;
+  }
+
+  public NotEqualsExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult elRes = left.evaluate(value);
+    ExpressionResult erRes = right.evaluate(value);
+
+    boolean result = false;
+    ExpressionResult val1 = elRes;
+    ExpressionResult val2 = erRes;
+    if (elRes.isNull() || erRes.isNull()) {
+      if (isNotNull) {
+        elRes.set(DataType.BOOLEAN, elRes.isNull() != erRes.isNull());
+      } else {
+        elRes.set(DataType.BOOLEAN, false);
+      }
+      return elRes;
+    }
+    //default implementation if the data types are different for the resultsets
+    if (elRes.getDataType() != erRes.getDataType()) {
+      //            result = elRes.getString().equals(erRes.getString());
+      if (elRes.getDataType().getPresedenceOrder() < erRes.getDataType().getPresedenceOrder()) {
+        val1 = erRes;
+        val2 = elRes;
+      }
+    }
+    switch (val1.getDataType()) {
+      case STRING:
+        result = !val1.getString().equals(val2.getString());
+        break;
+      case SHORT:
+        result = val1.getShort().shortValue() != val2.getShort().shortValue();
+        break;
+      case INT:
+        result = val1.getInt().intValue() != val2.getInt().intValue();
+        break;
+      case DOUBLE:
+        result = val1.getDouble().doubleValue() != val2.getDouble().doubleValue();
+        break;
+      case TIMESTAMP:
+        result = val1.getTime().longValue() != val2.getTime().longValue();
+        break;
+      case LONG:
+        result = elRes.getLong().longValue() != (erRes.getLong()).longValue();
+        break;
+      case DECIMAL:
+        result = elRes.getDecimal().compareTo(erRes.getDecimal()) != 0;
+        break;
+      default:
+        throw new FilterUnsupportedException(
+            "DataType: " + val1.getDataType() + " not supported for the filter expression");
+    }
+    val1.set(DataType.BOOLEAN, result);
+    return val1;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.NOT_EQUALS;
+  }
+
+  @Override public String getString() {
+    return "NotEquals(" + left.getString() + ',' + right.getString() + ')';
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/conditional/NotInExpression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/conditional/NotInExpression.java b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/NotInExpression.java
new file mode 100644
index 0000000..89a4b3c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/conditional/NotInExpression.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.conditional;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType;
+import org.apache.carbondata.scan.expression.Expression;
+import org.apache.carbondata.scan.expression.ExpressionResult;
+import org.apache.carbondata.scan.expression.exception.FilterIllegalMemberException;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.intf.ExpressionType;
+import org.apache.carbondata.scan.filter.intf.RowIntf;
+
+public class NotInExpression extends BinaryConditionalExpression {
+  private static final long serialVersionUID = -6835841923752118034L;
+  protected transient Set<ExpressionResult> setOfExprResult;
+
+  public NotInExpression(Expression left, Expression right) {
+    super(left, right);
+  }
+
+  @Override public ExpressionResult evaluate(RowIntf value)
+      throws FilterUnsupportedException, FilterIllegalMemberException {
+    ExpressionResult leftRsult = left.evaluate(value);
+    if (setOfExprResult == null) {
+      ExpressionResult val = null;
+      ExpressionResult rightRsult = right.evaluate(value);
+      setOfExprResult = new HashSet<ExpressionResult>(10);
+      for (ExpressionResult exprResVal : rightRsult.getList()) {
+        if (exprResVal.getDataType().getPresedenceOrder() < leftRsult.getDataType()
+            .getPresedenceOrder()) {
+          val = leftRsult;
+        } else {
+          val = exprResVal;
+        }
+        switch (val.getDataType()) {
+          case STRING:
+            val = new ExpressionResult(val.getDataType(), exprResVal.getString());
+            break;
+          case SHORT:
+            val = new ExpressionResult(val.getDataType(), exprResVal.getShort());
+            break;
+          case INT:
+            val = new ExpressionResult(val.getDataType(), exprResVal.getInt());
+            break;
+          case DOUBLE:
+            val = new ExpressionResult(val.getDataType(), exprResVal.getDouble());
+            break;
+          case TIMESTAMP:
+            val = new ExpressionResult(val.getDataType(), exprResVal.getTime());
+            break;
+          case LONG:
+            val = new ExpressionResult(val.getDataType(), exprResVal.getLong());
+            break;
+          case DECIMAL:
+            val = new ExpressionResult(val.getDataType(), exprResVal.getDecimal());
+            break;
+          default:
+            throw new FilterUnsupportedException(
+                "DataType: " + val.getDataType() + " not supported for the filter expression");
+        }
+        setOfExprResult.add(val);
+      }
+    }
+    leftRsult.set(DataType.BOOLEAN, !setOfExprResult.contains(leftRsult));
+
+    return leftRsult;
+  }
+
+  @Override public ExpressionType getFilterExpressionType() {
+    return ExpressionType.NOT_IN;
+  }
+
+  @Override public String getString() {
+    return "NOT IN(" + left.getString() + ',' + right.getString() + ')';
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/expression/exception/FilterIllegalMemberException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/expression/exception/FilterIllegalMemberException.java b/core/src/main/java/org/apache/carbondata/scan/expression/exception/FilterIllegalMemberException.java
new file mode 100644
index 0000000..f75aaed
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/expression/exception/FilterIllegalMemberException.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.expression.exception;
+
+import java.util.Locale;
+
+/**
+ * FilterIllegalMemberException class representing exception which can cause while evaluating
+ * filter members needs to be gracefully handled without propagating to outer layer so that
+ * the execution should not get interrupted.
+ */
+public class FilterIllegalMemberException extends Exception {
+
+  /**
+   * default serial version ID.
+   */
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * The Error message.
+   */
+  private String msg = "";
+
+  /**
+   * Constructor
+   *
+   * @param errorCode The error code for this exception.
+   * @param msg       The error message for this exception.
+   */
+  public FilterIllegalMemberException(String msg) {
+    super(msg);
+    this.msg = msg;
+  }
+
+  /**
+   * Constructor
+   *
+   * @param errorCode The error code for this exception.
+   * @param msg       The error message for this exception.
+   */
+  public FilterIllegalMemberException(String msg, Throwable t) {
+    super(msg, t);
+    this.msg = msg;
+  }
+
+  /**
+   * Constructor
+   *
+   * @param errorCode The error code for this exception.
+   * @param msg       The error message for this exception.
+   */
+  public FilterIllegalMemberException(Throwable t) {
+    super(t);
+  }
+
+  /**
+   * This method is used to get the localized message.
+   *
+   * @param locale - A Locale object represents a specific geographical,
+   *               political, or cultural region.
+   * @return - Localized error message.
+   */
+  public String getLocalizedMessage(Locale locale) {
+    return "";
+  }
+
+  /**
+   * getLocalizedMessage
+   */
+  @Override public String getLocalizedMessage() {
+    return super.getLocalizedMessage();
+  }
+
+  /**
+   * getMessage
+   */
+  public String getMessage() {
+    return this.msg;
+  }
+
+}