You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ja...@apache.org on 2017/02/24 05:38:45 UTC

[4/5] incubator-carbondata git commit: WIP Added code for new V3 format to optimize scan

WIP Added code for new V3 format to optimize scan

Fixed testcases

Fixed style

Fixed issue

Added read a head blocklet PR to it

fixed style

Refactored code

Added read a head blocklet

Optimized decoder

Updated code of V3 format interfaces

OPtimized greater than and less than filters

Fixed col group queries

Refactored V1 format with new interface

Fixed complex query

Fixed comments


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/72cb415a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/72cb415a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/72cb415a

Branch: refs/heads/master
Commit: 72cb415a1e1126882c38ecfead01dc6b7bb4cc07
Parents: 766671c
Author: ravipesala <ra...@gmail.com>
Authored: Fri Feb 3 16:11:06 2017 +0530
Committer: jackylk <ja...@huawei.com>
Committed: Fri Feb 24 13:34:40 2017 +0800

----------------------------------------------------------------------
 .../AbstractColumnDictionaryInfo.java           |  18 +-
 .../core/cache/dictionary/Dictionary.java       |  13 ++
 .../cache/dictionary/ForwardDictionary.java     |  13 ++
 .../cache/dictionary/ReverseDictionary.java     |  13 ++
 .../core/constants/CarbonCommonConstants.java   |  12 +-
 .../carbondata/core/datastore/DataRefNode.java  |  12 +-
 .../carbondata/core/datastore/FileHolder.java   |  14 ++
 .../datastore/chunk/AbstractRawColumnChunk.java | 124 +++++++++++
 .../chunk/impl/DimensionRawColumnChunk.java     | 105 +++++++++
 .../chunk/impl/MeasureRawColumnChunk.java       | 107 +++++++++
 .../reader/DimensionColumnChunkReader.java      |  20 +-
 .../chunk/reader/MeasureColumnChunkReader.java  |  20 +-
 ...mpressedDimensionChunkFileBasedReaderV1.java |  92 +++++---
 ...mpressedDimensionChunkFileBasedReaderV2.java | 215 +++++++++----------
 ...CompressedMeasureChunkFileBasedReaderV1.java |  49 +++--
 ...CompressedMeasureChunkFileBasedReaderV2.java | 169 +++++++--------
 .../core/datastore/columnar/UnBlockIndexer.java |   7 +-
 .../core/datastore/impl/DFSFileHolderImpl.java  |   9 +
 .../core/datastore/impl/FileHolderImpl.java     |   8 +
 .../impl/btree/AbstractBTreeLeafNode.java       |  12 +-
 .../datastore/impl/btree/BTreeNonLeafNode.java  |  12 +-
 .../impl/btree/BlockletBTreeLeafNode.java       |  20 +-
 .../core/metadata/blocklet/BlockletInfo.java    |  32 ++-
 .../DictionaryBasedVectorResultCollector.java   |  54 +++--
 .../core/scan/complextypes/ArrayQueryType.java  |  16 +-
 .../scan/complextypes/ComplexQueryType.java     |  13 +-
 .../scan/complextypes/PrimitiveQueryType.java   |  12 +-
 .../core/scan/complextypes/StructQueryType.java |  10 +-
 .../executor/impl/AbstractQueryExecutor.java    |  19 +-
 .../scan/executor/infos/BlockExecutionInfo.java |  27 +++
 .../core/scan/executor/util/QueryUtil.java      |  13 +-
 .../carbondata/core/scan/filter/FilterUtil.java |   5 -
 .../core/scan/filter/GenericQueryType.java      |   6 +-
 .../filter/executer/AndFilterExecuterImpl.java  |  12 +-
 .../executer/ExcludeFilterExecuterImpl.java     |  32 ++-
 .../scan/filter/executer/FilterExecuter.java    |   9 +-
 .../executer/IncludeFilterExecuterImpl.java     |  54 ++++-
 .../filter/executer/OrFilterExecuterImpl.java   |  11 +-
 .../executer/RestructureFilterExecuterImpl.java |  52 -----
 .../executer/RowLevelFilterExecuterImpl.java    | 192 +++++++++--------
 .../RowLevelRangeGrtThanFiterExecuterImpl.java  |  69 ++++--
 ...elRangeGrtrThanEquaToFilterExecuterImpl.java |  65 ++++--
 ...velRangeLessThanEqualFilterExecuterImpl.java |  67 ++++--
 .../RowLevelRangeLessThanFiterExecuterImpl.java |  66 ++++--
 .../RowLevelRangeFilterResolverImpl.java        |  34 ++-
 .../processor/AbstractDataBlockIterator.java    | 117 ++++++++--
 .../core/scan/processor/BlocksChunkHolder.java  |  51 +++--
 .../processor/impl/DataBlockIteratorImpl.java   |   8 +-
 .../core/scan/result/AbstractScannedResult.java | 113 +++++++---
 .../result/impl/FilterQueryScannedResult.java   |  25 ++-
 .../AbstractDetailQueryResultIterator.java      |  21 +-
 .../iterator/DetailQueryResultIterator.java     |  48 +----
 .../scan/scanner/AbstractBlockletScanner.java   | 106 ++++++---
 .../core/scan/scanner/BlockletScanner.java      |  20 ++
 .../core/scan/scanner/impl/FilterScanner.java   | 172 ++++++++++-----
 .../scan/scanner/impl/NonFilterScanner.java     |   3 -
 .../carbondata/core/util/BitSetGroup.java       |  82 +++++++
 .../apache/carbondata/core/util/CarbonUtil.java |  36 ++--
 .../core/util/DataFileFooterConverter2.java     |  13 +-
 .../carbondata/core/util/DataTypeUtil.java      |  82 +++++++
 .../carbondata/core/util/DataTypeUtilTest.java  |   2 +-
 .../scanner/impl/FilterScannerTest.java         |   2 +-
 .../readsupport/SparkRowReadSupportImpl.java    |   2 +-
 .../spark/sql/CarbonDictionaryDecoder.scala     |  17 +-
 .../spark/sql/CarbonDictionaryDecoder.scala     |   9 +-
 .../sql/optimizer/CarbonLateDecodeRule.scala    |   1 +
 66 files changed, 2025 insertions(+), 839 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
index f02e6b5..18f4885 100644
--- a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
@@ -240,12 +240,28 @@ public abstract class AbstractColumnDictionaryInfo implements DictionaryInfo {
     byte[] dictionaryValueInBytes = getDictionaryBytesFromSurrogate(surrogateKey);
     if (null != dictionaryValueInBytes) {
       dictionaryValue = new String(dictionaryValueInBytes,
-          Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
+          CarbonCommonConstants.DEFAULT_CHARSET_CLASS);
     }
     return dictionaryValue;
   }
 
   /**
+   * This method will find and return the dictionary value for a given surrogate key.
+   * Applicable scenarios:
+   * 1. Query final result preparation : While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return value if found else null
+   */
+  @Override public byte[] getDictionaryValueForKeyInBytes(int surrogateKey) {
+    if (surrogateKey < MINIMUM_SURROGATE_KEY) {
+      return null;
+    }
+    return getDictionaryBytesFromSurrogate(surrogateKey);
+  }
+
+  /**
    * This method will find and return the dictionary value as byte array for a
    * given surrogate key
    *

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/Dictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/Dictionary.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/Dictionary.java
index 8a74040..7302de2 100644
--- a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/Dictionary.java
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/Dictionary.java
@@ -59,6 +59,19 @@ public interface Dictionary {
   String getDictionaryValueForKey(int surrogateKey);
 
   /**
+   * This method will find and return the dictionary value for a given surrogate key in bytes.
+   * It is as same as getDictionaryValueForKey but it does not convert bytes to String,
+   * it returns bytes directly. User can convert to String by using new String(bytes).
+   * Applicable scenarios:
+   * 1. Query final result preparation : While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return value if found else null
+   */
+  byte[] getDictionaryValueForKeyInBytes(int surrogateKey);
+
+  /**
    * This method will find and return the sort index for a given dictionary id.
    * Applicable scenarios:
    * 1. Used in case of order by queries when data sorting is required

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionary.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionary.java
index 92fe522..abc95e8 100644
--- a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionary.java
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionary.java
@@ -82,6 +82,19 @@ public class ForwardDictionary implements Dictionary {
   }
 
   /**
+   * This method will find and return the dictionary value for a given surrogate key in bytes.
+   * Applicable scenarios:
+   * 1. Query final result preparation : While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return value if found else null
+   */
+  @Override public byte[] getDictionaryValueForKeyInBytes(int surrogateKey) {
+    return columnDictionaryInfo.getDictionaryValueForKeyInBytes(surrogateKey);
+  }
+
+  /**
    * This method will find and return the sort index for a given dictionary id.
    * Applicable scenarios:
    * 1. Used in case of order by queries when data sorting is required

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionary.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionary.java b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionary.java
index 97736ba..ff0e687 100644
--- a/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionary.java
+++ b/core/src/main/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionary.java
@@ -76,6 +76,19 @@ public class ReverseDictionary implements Dictionary {
   }
 
   /**
+   * This method will find and return the dictionary value for a given surrogate key in bytes.
+   * Applicable scenarios:
+   * 1. Query final result preparation : While convert the final result which will
+   * be surrogate key back to original dictionary values this method will be used
+   *
+   * @param surrogateKey a unique ID for a dictionary value
+   * @return value if found else null
+   */
+  @Override public byte[] getDictionaryValueForKeyInBytes(int surrogateKey) {
+    return columnReverseDictionaryInfo.getDictionaryValueForKeyInBytes(surrogateKey);
+  }
+
+  /**
    * This method will find and return the sort index for a given dictionary id.
    * Applicable scenarios:
    * 1. Used in case of order by queries when data sorting is required

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index d4347f1..1142c4e 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -17,6 +17,8 @@
 
 package org.apache.carbondata.core.constants;
 
+import java.nio.charset.Charset;
+
 public final class CarbonCommonConstants {
   /**
    * integer size in bytes
@@ -586,10 +588,11 @@ public final class CarbonCommonConstants {
    */
   public static final String TABLEUPDATESTATUS_FILENAME = "tableupdatestatus";
   /**
-   * INMEMORY_REOCRD_SIZE
+   * The batch size of records which returns to client.
    */
   public static final String DETAIL_QUERY_BATCH_SIZE = "carbon.detail.batch.size";
-  public static final int DETAIL_QUERY_BATCH_SIZE_DEFAULT = 10000;
+
+  public static final int DETAIL_QUERY_BATCH_SIZE_DEFAULT = 100;
   /**
    * SPILL_OVER_DISK_PATH
    */
@@ -711,6 +714,11 @@ public final class CarbonCommonConstants {
   public static final String DEFAULT_CHARSET = "UTF-8";
 
   /**
+   * default charset class to be used for reading and writing
+   */
+  public static final Charset DEFAULT_CHARSET_CLASS = Charset.forName(DEFAULT_CHARSET);
+
+  /**
    * surrogate key that will be sent whenever in the dictionary chunks
    * a valid surrogate key is not found for a given dictionary value
    */

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/DataRefNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/DataRefNode.java b/core/src/main/java/org/apache/carbondata/core/datastore/DataRefNode.java
index 6dcc2b8..456710a 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/DataRefNode.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/DataRefNode.java
@@ -19,8 +19,8 @@ package org.apache.carbondata.core.datastore;
 import java.io.IOException;
 
 import org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache;
-import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
-import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.MeasureRawColumnChunk;
 
 /**
  * Interface data block reference
@@ -77,7 +77,7 @@ public interface DataRefNode {
    *                     data in On IO
    * @return dimension data chunks
    */
-  DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader, int[][] blockIndexes)
+  DimensionRawColumnChunk[] getDimensionChunks(FileHolder fileReader, int[][] blockIndexes)
       throws IOException;
 
   /**
@@ -86,7 +86,7 @@ public interface DataRefNode {
    * @param fileReader file reader to read the chunk from file
    * @return dimension data chunk
    */
-  DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader, int blockIndexes)
+  DimensionRawColumnChunk getDimensionChunk(FileHolder fileReader, int blockIndexes)
       throws IOException;
 
   /**
@@ -101,7 +101,7 @@ public interface DataRefNode {
    *                     data in On IO
    * @return measure column data chunk
    */
-  MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader, int[][] blockIndexes)
+  MeasureRawColumnChunk[] getMeasureChunks(FileHolder fileReader, int[][] blockIndexes)
       throws IOException;
 
   /**
@@ -111,7 +111,7 @@ public interface DataRefNode {
    * @param blockIndex block index to be read from file
    * @return measure data chunk
    */
-  MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex) throws IOException;
+  MeasureRawColumnChunk getMeasureChunk(FileHolder fileReader, int blockIndex) throws IOException;
 
   /**
    * @param deleteDeltaDataCache

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/FileHolder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/FileHolder.java b/core/src/main/java/org/apache/carbondata/core/datastore/FileHolder.java
index 12e525e..b1eb1ee 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/FileHolder.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/FileHolder.java
@@ -18,8 +18,22 @@
 package org.apache.carbondata.core.datastore;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 
 public interface FileHolder {
+
+  /**
+   * This method will be used to reads the data to byteBuffer from file based on offset
+   * and length(number of bytes) need to read
+   *
+   * @param filePath fully qualified file path
+   * @param byteBuffer
+   * @param offset reading start position,
+   * @param length number of bytes to be read
+   * @throws IOException
+   */
+  void readByteBuffer(String filePath, ByteBuffer byteBuffer, long offset, int length)
+      throws IOException;
   /**
    * This method will be used to read the byte array from file based on offset
    * and length(number of bytes) need to read

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/AbstractRawColumnChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/AbstractRawColumnChunk.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/AbstractRawColumnChunk.java
new file mode 100644
index 0000000..d04077c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/AbstractRawColumnChunk.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.datastore.chunk;
+
+import java.nio.ByteBuffer;
+
+
+/**
+ * It contains group of uncompressed blocklets on one column.
+ */
+public abstract class AbstractRawColumnChunk {
+
+  private byte[][] minValues;
+
+  private byte[][] maxValues;
+
+  protected ByteBuffer rawData;
+
+  private int[] lengths;
+
+  private int[] offsets;
+
+  private int[] rowCount;
+
+  protected int pagesCount;
+
+  protected int blockletId;
+
+  protected int offSet;
+
+  protected int length;
+
+  public AbstractRawColumnChunk(int blockletId, ByteBuffer rawData, int offSet, int length) {
+    this.blockletId = blockletId;
+    this.rawData = rawData;
+    this.offSet = offSet;
+    this.length = length;
+  }
+
+  public byte[][] getMinValues() {
+    return minValues;
+  }
+
+  public void setMinValues(byte[][] minValues) {
+    this.minValues = minValues;
+  }
+
+  public byte[][] getMaxValues() {
+    return maxValues;
+  }
+
+  public void setMaxValues(byte[][] maxValues) {
+    this.maxValues = maxValues;
+  }
+
+  public ByteBuffer getRawData() {
+    return rawData;
+  }
+
+  public void setRawData(ByteBuffer rawData) {
+    this.rawData = rawData;
+  }
+
+  public int[] getLengths() {
+    return lengths;
+  }
+
+  public void setLengths(int[] lengths) {
+    this.lengths = lengths;
+  }
+
+  public int[] getOffsets() {
+    return offsets;
+  }
+
+  public void setOffsets(int[] offsets) {
+    this.offsets = offsets;
+  }
+
+  public int getPagesCount() {
+    return pagesCount;
+  }
+
+  public void setPagesCount(int pagesCount) {
+    this.pagesCount = pagesCount;
+  }
+
+  public int[] getRowCount() {
+    return rowCount;
+  }
+
+  public void setRowCount(int[] rowCount) {
+    this.rowCount = rowCount;
+  }
+
+  public abstract void freeMemory();
+
+  public int getBlockletId() {
+    return blockletId;
+  }
+
+  public int getOffSet() {
+    return offSet;
+  }
+
+  public int getLength() {
+    return length;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/DimensionRawColumnChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/DimensionRawColumnChunk.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/DimensionRawColumnChunk.java
new file mode 100644
index 0000000..048a703
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/DimensionRawColumnChunk.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.datastore.chunk.impl;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.core.datastore.FileHolder;
+import org.apache.carbondata.core.datastore.chunk.AbstractRawColumnChunk;
+import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.reader.DimensionColumnChunkReader;
+
+/**
+ * Contains raw dimension data,
+ * 1. The read uncompressed raw data of column chunk with all pages is stored in this instance.
+ * 2. The raw data can be converted to processed chunk using convertToDimColDataChunk method
+ *  by specifying page number.
+ */
+public class DimensionRawColumnChunk extends AbstractRawColumnChunk {
+
+  private DimensionColumnDataChunk[] dataChunks;
+
+  private DimensionColumnChunkReader chunkReader;
+
+  private FileHolder fileHolder;
+
+  public DimensionRawColumnChunk(int blockletId, ByteBuffer rawData, int offSet, int length,
+      DimensionColumnChunkReader columnChunkReader) {
+    super(blockletId, rawData, offSet, length);
+    this.chunkReader = columnChunkReader;
+  }
+
+  /**
+   * Convert all raw data with all pages to processed DimensionColumnDataChunk's
+   * @return
+   */
+  public DimensionColumnDataChunk[] convertToDimColDataChunks() {
+    if (dataChunks == null) {
+      dataChunks = new DimensionColumnDataChunk[pagesCount];
+    }
+    for (int i = 0; i < pagesCount; i++) {
+      try {
+        if (dataChunks[i] == null) {
+          dataChunks[i] = chunkReader.convertToDimensionChunk(this, i);
+        }
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    }
+    return dataChunks;
+  }
+
+  /**
+   * Convert raw data with specified page number processed to DimensionColumnDataChunk
+   * @param index
+   * @return
+   */
+  public DimensionColumnDataChunk convertToDimColDataChunk(int index) {
+    assert index < pagesCount;
+    if (dataChunks == null) {
+      dataChunks = new DimensionColumnDataChunk[pagesCount];
+    }
+    if (dataChunks[index] == null) {
+      try {
+        dataChunks[index] = chunkReader.convertToDimensionChunk(this, index);
+      } catch (IOException e) {
+        throw new RuntimeException(e);
+      }
+    }
+
+    return dataChunks[index];
+  }
+
+  @Override public void freeMemory() {
+    if (null != dataChunks) {
+      for (int i = 0; i < dataChunks.length; i++) {
+        if (dataChunks[i] != null) {
+          dataChunks[i].freeMemory();
+        }
+      }
+    }
+  }
+
+  public void setFileHolder(FileHolder fileHolder) {
+    this.fileHolder = fileHolder;
+  }
+
+  public FileHolder getFileReader() {
+    return fileHolder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/MeasureRawColumnChunk.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/MeasureRawColumnChunk.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/MeasureRawColumnChunk.java
new file mode 100644
index 0000000..4702abd
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/MeasureRawColumnChunk.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.datastore.chunk.impl;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.core.datastore.FileHolder;
+import org.apache.carbondata.core.datastore.chunk.AbstractRawColumnChunk;
+import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.reader.MeasureColumnChunkReader;
+
+/**
+ * Contains raw measure data
+ * 1. The read uncompressed raw data of column chunk with all pages is stored in this instance.
+ * 2. The raw data can be converted to processed chunk using convertToMeasureColDataChunk method
+ *  by specifying page number.
+ */
+public class MeasureRawColumnChunk extends AbstractRawColumnChunk {
+
+  private MeasureColumnDataChunk[] dataChunks;
+
+  private MeasureColumnChunkReader chunkReader;
+
+  private FileHolder fileReader;
+
+  public MeasureRawColumnChunk(int blockId, ByteBuffer rawData, int offSet, int length,
+      MeasureColumnChunkReader chunkReader) {
+    super(blockId, rawData, offSet, length);
+    this.chunkReader = chunkReader;
+  }
+
+  /**
+   * Convert all raw data with all pages to processed MeasureColumnDataChunk's
+   * @return
+   */
+  public MeasureColumnDataChunk[] convertToMeasureColDataChunks() {
+    if (dataChunks == null) {
+      dataChunks = new MeasureColumnDataChunk[pagesCount];
+    }
+    for (int i = 0; i < pagesCount; i++) {
+      try {
+        if (dataChunks[i] == null) {
+          dataChunks[i] = chunkReader.convertToMeasureChunk(this, i);
+        }
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    }
+
+    return dataChunks;
+  }
+
+  /**
+   * Convert raw data with specified page number processed to MeasureColumnDataChunk
+   * @param index
+   * @return
+   */
+  public MeasureColumnDataChunk convertToMeasureColDataChunk(int index) {
+    assert index < pagesCount;
+    if (dataChunks == null) {
+      dataChunks = new MeasureColumnDataChunk[pagesCount];
+    }
+
+    try {
+      if (dataChunks[index] == null) {
+        dataChunks[index] = chunkReader.convertToMeasureChunk(this, index);
+      }
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+
+    return dataChunks[index];
+  }
+
+  @Override public void freeMemory() {
+    if (null != dataChunks) {
+      for (int i = 0; i < dataChunks.length; i++) {
+        if (dataChunks[i] != null) {
+          dataChunks[i].freeMemory();
+        }
+      }
+    }
+  }
+
+  public void setFileReader(FileHolder fileReader) {
+    this.fileReader = fileReader;
+  }
+
+  public FileHolder getFileReader() {
+    return fileReader;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/DimensionColumnChunkReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/DimensionColumnChunkReader.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/DimensionColumnChunkReader.java
index a542b25..7110bfa 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/DimensionColumnChunkReader.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/DimensionColumnChunkReader.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 
 import org.apache.carbondata.core.datastore.FileHolder;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
 
 /**
  * Interface for reading the data chunk
@@ -32,19 +33,30 @@ public interface DimensionColumnChunkReader {
    * Below method will be used to read the chunk based on block indexes
    *
    * @param fileReader   file reader to read the blocks from file
-   * @param blockIndexes blocks to be read
+   * @param blockletIndexes blocklets to be read
    * @return dimension column chunks
    */
-  DimensionColumnDataChunk[] readDimensionChunks(FileHolder fileReader, int[][] blockIndexes)
+  DimensionRawColumnChunk[] readRawDimensionChunks(FileHolder fileReader, int[][] blockletIndexes)
       throws IOException;
 
   /**
    * Below method will be used to read the chunk based on block index
    *
    * @param fileReader file reader to read the blocks from file
-   * @param blockIndex block to be read
+   * @param blockletIndex block to be read
    * @return dimension column chunk
    */
-  DimensionColumnDataChunk readDimensionChunk(FileHolder fileReader, int blockIndex)
+  DimensionRawColumnChunk readRawDimensionChunk(FileHolder fileReader, int blockletIndex)
       throws IOException;
+
+  /**
+   * Converts the raw data chunk to processed chunk based on blocklet indexes and page numbers
+   *
+   * @param dimensionRawColumnChunk raw data chunk
+   * @param pageNumber page number to be processed
+   * @return
+   * @throws IOException
+   */
+  DimensionColumnDataChunk convertToDimensionChunk(DimensionRawColumnChunk dimensionRawColumnChunk,
+      int pageNumber) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/MeasureColumnChunkReader.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/MeasureColumnChunkReader.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/MeasureColumnChunkReader.java
index a3dbcc0..ef7875b 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/MeasureColumnChunkReader.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/MeasureColumnChunkReader.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 
 import org.apache.carbondata.core.datastore.FileHolder;
 import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.MeasureRawColumnChunk;
 
 /**
  * Reader interface for reading the measure blocks from file
@@ -33,7 +34,7 @@ public interface MeasureColumnChunkReader {
    * @param blockIndexes blocks to be read
    * @return measure data chunks
    */
-  MeasureColumnDataChunk[] readMeasureChunks(FileHolder fileReader, int[][] blockIndexes)
+  MeasureRawColumnChunk[] readRawMeasureChunks(FileHolder fileReader, int[][] blockIndexes)
       throws IOException;
 
   /**
@@ -43,6 +44,21 @@ public interface MeasureColumnChunkReader {
    * @param blockIndex block to be read
    * @return measure data chunk
    */
-  MeasureColumnDataChunk readMeasureChunk(FileHolder fileReader, int blockIndex) throws IOException;
+  MeasureRawColumnChunk readRawMeasureChunk(FileHolder fileReader, int blockIndex)
+      throws IOException;
+
+  /**
+   * Covert raw data to measure chunk
+   *
+   * @param fileReader
+   * @param blockIndex
+   * @param rawData
+   * @param offset
+   * @param length
+   * @return
+   * @throws IOException
+   */
+  MeasureColumnDataChunk convertToMeasureChunk(MeasureRawColumnChunk measureRawColumnChunk,
+      int pageNumber) throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v1/CompressedDimensionChunkFileBasedReaderV1.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v1/CompressedDimensionChunkFileBasedReaderV1.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v1/CompressedDimensionChunkFileBasedReaderV1.java
index 1130e5c..00e6351 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v1/CompressedDimensionChunkFileBasedReaderV1.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v1/CompressedDimensionChunkFileBasedReaderV1.java
@@ -17,11 +17,13 @@
 package org.apache.carbondata.core.datastore.chunk.reader.dimension.v1;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.List;
 
 import org.apache.carbondata.core.datastore.FileHolder;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.ColumnGroupDimensionDataChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.FixedLengthDimensionDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.VariableLengthDimensionDataChunk;
 import org.apache.carbondata.core.datastore.chunk.reader.dimension.AbstractChunkReader;
@@ -56,70 +58,99 @@ public class CompressedDimensionChunkFileBasedReaderV1 extends AbstractChunkRead
   }
 
   /**
-   * Below method will be used to read the chunk based on block indexes
+   * Below method will be used to read the raw chunk based on block indexes
    *
    * @param fileReader   file reader to read the blocks from file
-   * @param blockIndexes blocks to be read
+   * @param blockletIndexes blocks to be read
    * @return dimension column chunks
    */
-  @Override public DimensionColumnDataChunk[] readDimensionChunks(FileHolder fileReader,
-      int[][] blockIndexes) throws IOException {
-    // read the column chunk based on block index and add
-    DimensionColumnDataChunk[] dataChunks =
-        new DimensionColumnDataChunk[dimensionColumnChunk.size()];
-    for (int i = 0; i < blockIndexes.length; i++) {
-      for (int j = blockIndexes[i][0]; j <= blockIndexes[i][1]; j++) {
-        dataChunks[j] = readDimensionChunk(fileReader, j);
+  @Override public DimensionRawColumnChunk[] readRawDimensionChunks(FileHolder fileReader,
+      int[][] blockletIndexes) throws IOException {
+    DimensionRawColumnChunk[] dataChunks = new DimensionRawColumnChunk[dimensionColumnChunk.size()];
+    for (int i = 0; i < blockletIndexes.length; i++) {
+      for (int j = blockletIndexes[i][0]; j <= blockletIndexes[i][1]; j++) {
+        dataChunks[j] = readRawDimensionChunk(fileReader, j);
       }
     }
     return dataChunks;
   }
 
   /**
-   * Below method will be used to read the chunk based on block index
+   * Below method will be used to read the raw chunk based on block index
    *
    * @param fileReader file reader to read the blocks from file
-   * @param blockIndex block to be read
+   * @param blockletIndex block to be read
    * @return dimension column chunk
    */
-  @Override public DimensionColumnDataChunk readDimensionChunk(FileHolder fileReader,
-      int blockIndex) throws IOException {
+  @Override public DimensionRawColumnChunk readRawDimensionChunk(FileHolder fileReader,
+      int blockletIndex) throws IOException {
+    DataChunk dataChunk = dimensionColumnChunk.get(blockletIndex);
+    ByteBuffer buffer =
+        ByteBuffer.allocateDirect(dataChunk.getDataPageLength());
+    synchronized (fileReader) {
+      fileReader.readByteBuffer(filePath, buffer,
+          dataChunk.getDataPageOffset(),
+          dataChunk.getDataPageLength());
+    }
+    DimensionRawColumnChunk rawColumnChunk = new DimensionRawColumnChunk(blockletIndex, buffer, 0,
+        dataChunk.getDataPageLength(), this);
+    rawColumnChunk.setFileHolder(fileReader);
+    rawColumnChunk.setPagesCount(1);
+    rawColumnChunk.setRowCount(new int[] { numberOfRows });
+    return rawColumnChunk;
+  }
+
+  @Override public DimensionColumnDataChunk convertToDimensionChunk(
+      DimensionRawColumnChunk dimensionRawColumnChunk, int pageNumber) throws IOException {
+    int blockIndex = dimensionRawColumnChunk.getBlockletId();
     byte[] dataPage = null;
     int[] invertedIndexes = null;
     int[] invertedIndexesReverse = null;
     int[] rlePage = null;
+    FileHolder fileReader = dimensionRawColumnChunk.getFileReader();
+
+    ByteBuffer rawData = dimensionRawColumnChunk.getRawData();
+    rawData.position(dimensionRawColumnChunk.getOffSet());
+    byte[] data = new byte[dimensionRawColumnChunk.getLength()];
+    rawData.get(data);
+    dataPage = COMPRESSOR.unCompressByte(data);
 
-    // first read the data and uncompressed it
-    dataPage = COMPRESSOR.unCompressByte(fileReader
-        .readByteArray(filePath, dimensionColumnChunk.get(blockIndex).getDataPageOffset(),
-            dimensionColumnChunk.get(blockIndex).getDataPageLength()));
     // if row id block is present then read the row id chunk and uncompress it
-    if (CarbonUtil.hasEncoding(dimensionColumnChunk.get(blockIndex).getEncodingList(),
+    DataChunk dataChunk = dimensionColumnChunk.get(blockIndex);
+    if (CarbonUtil.hasEncoding(dataChunk.getEncodingList(),
         Encoding.INVERTED_INDEX)) {
+      byte[] columnIndexData;
+      synchronized (fileReader) {
+        columnIndexData = fileReader
+            .readByteArray(filePath, dataChunk.getRowIdPageOffset(),
+                dataChunk.getRowIdPageLength());
+      }
       invertedIndexes = CarbonUtil
-          .getUnCompressColumnIndex(dimensionColumnChunk.get(blockIndex).getRowIdPageLength(),
-              fileReader.readByteArray(filePath,
-                  dimensionColumnChunk.get(blockIndex).getRowIdPageOffset(),
-                  dimensionColumnChunk.get(blockIndex).getRowIdPageLength()), numberComressor, 0);
+          .getUnCompressColumnIndex(dataChunk.getRowIdPageLength(),
+              columnIndexData, numberComressor, 0);
       // get the reverse index
       invertedIndexesReverse = getInvertedReverseIndex(invertedIndexes);
     }
     // if rle is applied then read the rle block chunk and then uncompress
     //then actual data based on rle block
     if (CarbonUtil
-        .hasEncoding(dimensionColumnChunk.get(blockIndex).getEncodingList(), Encoding.RLE)) {
+        .hasEncoding(dataChunk.getEncodingList(), Encoding.RLE)) {
       // read and uncompress the rle block
-      rlePage = numberComressor.unCompress(fileReader
-              .readByteArray(filePath, dimensionColumnChunk.get(blockIndex).getRlePageOffset(),
-                  dimensionColumnChunk.get(blockIndex).getRlePageLength()), 0,
-          dimensionColumnChunk.get(blockIndex).getRlePageLength());
+      byte[] key;
+      synchronized (fileReader) {
+        key = fileReader
+            .readByteArray(filePath, dataChunk.getRlePageOffset(),
+                dataChunk.getRlePageLength());
+      }
+      rlePage = numberComressor
+          .unCompress(key, 0, dataChunk.getRlePageLength());
       // uncompress the data with rle indexes
       dataPage = UnBlockIndexer.uncompressData(dataPage, rlePage, eachColumnValueSize[blockIndex]);
       rlePage = null;
     }
     // fill chunk attributes
     DimensionColumnDataChunk columnDataChunk = null;
-    if (dimensionColumnChunk.get(blockIndex).isRowMajor()) {
+    if (dataChunk.isRowMajor()) {
       // to store fixed length column chunk values
       columnDataChunk = new ColumnGroupDimensionDataChunk(dataPage, eachColumnValueSize[blockIndex],
           numberOfRows);
@@ -127,7 +158,7 @@ public class CompressedDimensionChunkFileBasedReaderV1 extends AbstractChunkRead
     // if no dictionary column then first create a no dictionary column chunk
     // and set to data chunk instance
     else if (!CarbonUtil
-        .hasEncoding(dimensionColumnChunk.get(blockIndex).getEncodingList(), Encoding.DICTIONARY)) {
+        .hasEncoding(dataChunk.getEncodingList(), Encoding.DICTIONARY)) {
       columnDataChunk =
           new VariableLengthDimensionDataChunk(dataPage, invertedIndexes, invertedIndexesReverse,
               numberOfRows);
@@ -139,5 +170,4 @@ public class CompressedDimensionChunkFileBasedReaderV1 extends AbstractChunkRead
     }
     return columnDataChunk;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v2/CompressedDimensionChunkFileBasedReaderV2.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v2/CompressedDimensionChunkFileBasedReaderV2.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v2/CompressedDimensionChunkFileBasedReaderV2.java
index 3257ed4..9d5849f 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v2/CompressedDimensionChunkFileBasedReaderV2.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v2/CompressedDimensionChunkFileBasedReaderV2.java
@@ -17,11 +17,13 @@
 package org.apache.carbondata.core.datastore.chunk.reader.dimension.v2;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.List;
 
 import org.apache.carbondata.core.datastore.FileHolder;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.ColumnGroupDimensionDataChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.FixedLengthDimensionDataChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.VariableLengthDimensionDataChunk;
 import org.apache.carbondata.core.datastore.chunk.reader.dimension.AbstractChunkReader;
@@ -44,7 +46,7 @@ public class CompressedDimensionChunkFileBasedReaderV2 extends AbstractChunkRead
   /**
    * dimension chunks length
    */
-  private List<Short> dimensionChunksLength;
+  private List<Integer> dimensionChunksLength;
 
   /**
    * Constructor to get minimum parameter to create instance of this class
@@ -70,41 +72,43 @@ public class CompressedDimensionChunkFileBasedReaderV2 extends AbstractChunkRead
    * For last column read is separately and process
    *
    * @param fileReader   file reader to read the blocks from file
-   * @param blockIndexes blocks range to be read
+   * @param blockletIndexes blocks range to be read
    * @return dimension column chunks
    */
-  @Override public DimensionColumnDataChunk[] readDimensionChunks(final FileHolder fileReader,
-      final int[][] blockIndexes) throws IOException {
+  @Override public DimensionRawColumnChunk[] readRawDimensionChunks(final FileHolder fileReader,
+      final int[][] blockletIndexes) throws IOException {
     // read the column chunk based on block index and add
-    DimensionColumnDataChunk[] dataChunks =
-        new DimensionColumnDataChunk[dimensionChunksOffset.size()];
+    DimensionRawColumnChunk[] dataChunks =
+        new DimensionRawColumnChunk[dimensionChunksOffset.size()];
     // if blocklet index is empty then return empry data chunk
-    if (blockIndexes.length == 0) {
+    if (blockletIndexes.length == 0) {
       return dataChunks;
     }
-    DimensionColumnDataChunk[] groupChunk = null;
+    DimensionRawColumnChunk[] groupChunk = null;
     int index = 0;
     // iterate till block indexes -1 as block index will be in sorted order, so to avoid
     // the last column reading in group
-    for (int i = 0; i < blockIndexes.length - 1; i++) {
+    for (int i = 0; i < blockletIndexes.length - 1; i++) {
       index = 0;
-      groupChunk = readDimensionChunksInGroup(fileReader, blockIndexes[i][0], blockIndexes[i][1]);
-      for (int j = blockIndexes[i][0]; j <= blockIndexes[i][1]; j++) {
+      groupChunk =
+          readRawDimensionChunksInGroup(fileReader, blockletIndexes[i][0], blockletIndexes[i][1]);
+      for (int j = blockletIndexes[i][0]; j <= blockletIndexes[i][1]; j++) {
         dataChunks[j] = groupChunk[index++];
       }
     }
     // check last index is present in block index, if it is present then read separately
-    if (blockIndexes[blockIndexes.length - 1][0] == dimensionChunksOffset.size() - 1) {
-      dataChunks[blockIndexes[blockIndexes.length - 1][0]] =
-          readDimensionChunk(fileReader, blockIndexes[blockIndexes.length - 1][0]);
+    if (blockletIndexes[blockletIndexes.length - 1][0] == dimensionChunksOffset.size() - 1) {
+      dataChunks[blockletIndexes[blockletIndexes.length - 1][0]] =
+          readRawDimensionChunk(fileReader, blockletIndexes[blockletIndexes.length - 1][0]);
     }
     // otherwise read the data in group
     else {
-      groupChunk = readDimensionChunksInGroup(fileReader, blockIndexes[blockIndexes.length - 1][0],
-          blockIndexes[blockIndexes.length - 1][1]);
+      groupChunk =
+          readRawDimensionChunksInGroup(fileReader, blockletIndexes[blockletIndexes.length - 1][0],
+              blockletIndexes[blockletIndexes.length - 1][1]);
       index = 0;
-      for (int j = blockIndexes[blockIndexes.length - 1][0];
-           j <= blockIndexes[blockIndexes.length - 1][1]; j++) {
+      for (int j = blockletIndexes[blockletIndexes.length - 1][0];
+           j <= blockletIndexes[blockletIndexes.length - 1][1]; j++) {
         dataChunks[j] = groupChunk[index++];
       }
     }
@@ -115,48 +119,100 @@ public class CompressedDimensionChunkFileBasedReaderV2 extends AbstractChunkRead
    * Below method will be used to read the chunk based on block index
    *
    * @param fileReader file reader to read the blocks from file
-   * @param blockIndex block to be read
+   * @param blockletIndex block to be read
    * @return dimension column chunk
    */
-  @Override public DimensionColumnDataChunk readDimensionChunk(FileHolder fileReader,
-      int blockIndex) throws IOException {
+  public DimensionRawColumnChunk readRawDimensionChunk(FileHolder fileReader,
+      int blockletIndex) throws IOException {
+    int length = 0;
+    if (dimensionChunksOffset.size() - 1 == blockletIndex) {
+      // Incase of last block read only for datachunk and read remaining while converting it.
+      length = dimensionChunksLength.get(blockletIndex);
+    } else {
+      long currentDimensionOffset = dimensionChunksOffset.get(blockletIndex);
+      length = (int) (dimensionChunksOffset.get(blockletIndex + 1) - currentDimensionOffset);
+    }
+    ByteBuffer buffer = ByteBuffer.allocateDirect(length);
+    synchronized (fileReader) {
+      fileReader.readByteBuffer(filePath, buffer, dimensionChunksOffset.get(blockletIndex), length);
+    }
+    DimensionRawColumnChunk rawColumnChunk =
+        new DimensionRawColumnChunk(blockletIndex, buffer, 0, length, this);
+    rawColumnChunk.setFileHolder(fileReader);
+    rawColumnChunk.setPagesCount(1);
+    rawColumnChunk.setRowCount(new int[]{numberOfRows});
+    return rawColumnChunk;
+  }
+
+  private DimensionRawColumnChunk[] readRawDimensionChunksInGroup(FileHolder fileReader,
+      int startBlockIndex, int endBlockIndex) throws IOException {
+    long currentDimensionOffset = dimensionChunksOffset.get(startBlockIndex);
+    ByteBuffer buffer = ByteBuffer.allocateDirect(
+        (int) (dimensionChunksOffset.get(endBlockIndex + 1) - currentDimensionOffset));
+    synchronized (fileReader) {
+      fileReader.readByteBuffer(filePath, buffer, currentDimensionOffset,
+          (int) (dimensionChunksOffset.get(endBlockIndex + 1) - currentDimensionOffset));
+    }
+    DimensionRawColumnChunk[] dataChunks =
+        new DimensionRawColumnChunk[endBlockIndex - startBlockIndex + 1];
+    int index = 0;
+    int runningLength = 0;
+    for (int i = startBlockIndex; i <= endBlockIndex; i++) {
+      int currentLength = (int) (dimensionChunksOffset.get(i + 1) - dimensionChunksOffset.get(i));
+      dataChunks[index] =
+          new DimensionRawColumnChunk(i, buffer, runningLength, currentLength, this);
+      dataChunks[index].setFileHolder(fileReader);
+      dataChunks[index].setPagesCount(1);
+      dataChunks[index].setRowCount(new int[] { numberOfRows });
+      runningLength += currentLength;
+      index++;
+    }
+    return dataChunks;
+  }
+
+  public DimensionColumnDataChunk convertToDimensionChunk(
+      DimensionRawColumnChunk dimensionRawColumnChunk, int pageNumber) throws IOException {
     byte[] dataPage = null;
     int[] invertedIndexes = null;
     int[] invertedIndexesReverse = null;
     int[] rlePage = null;
     DataChunk2 dimensionColumnChunk = null;
-    byte[] data = null;
-    int copySourcePoint = 0;
-    byte[] dimensionChunk = null;
+    int copySourcePoint = dimensionRawColumnChunk.getOffSet();
+    int blockIndex = dimensionRawColumnChunk.getBlockletId();
+    ByteBuffer rawData = dimensionRawColumnChunk.getRawData();
     if (dimensionChunksOffset.size() - 1 == blockIndex) {
-      dimensionChunk = fileReader.readByteArray(filePath, dimensionChunksOffset.get(blockIndex),
-          dimensionChunksLength.get(blockIndex));
       dimensionColumnChunk = CarbonUtil
-          .readDataChunk(dimensionChunk, copySourcePoint, dimensionChunksLength.get(blockIndex));
+          .readDataChunk(rawData, copySourcePoint, dimensionRawColumnChunk.getLength());
       int totalDimensionDataLength =
           dimensionColumnChunk.data_page_length + dimensionColumnChunk.rle_page_length
               + dimensionColumnChunk.rowid_page_length;
-      data = fileReader.readByteArray(filePath,
-          dimensionChunksOffset.get(blockIndex) + dimensionChunksLength.get(blockIndex),
-          totalDimensionDataLength);
+      synchronized (dimensionRawColumnChunk.getFileReader()) {
+        rawData = ByteBuffer.allocateDirect(totalDimensionDataLength);
+        dimensionRawColumnChunk.getFileReader().readByteBuffer(filePath, rawData,
+            dimensionChunksOffset.get(blockIndex) + dimensionChunksLength.get(blockIndex),
+            totalDimensionDataLength);
+      }
     } else {
-      long currentDimensionOffset = dimensionChunksOffset.get(blockIndex);
-      data = fileReader.readByteArray(filePath, currentDimensionOffset,
-          (int) (dimensionChunksOffset.get(blockIndex + 1) - currentDimensionOffset));
       dimensionColumnChunk =
-          CarbonUtil.readDataChunk(data, copySourcePoint, dimensionChunksLength.get(blockIndex));
+          CarbonUtil.readDataChunk(rawData, copySourcePoint, dimensionChunksLength.get(blockIndex));
       copySourcePoint += dimensionChunksLength.get(blockIndex);
     }
 
+    byte[] data = new byte[dimensionColumnChunk.data_page_length];
+    rawData.position(copySourcePoint);
+    rawData.get(data);
     // first read the data and uncompressed it
     dataPage =
-        COMPRESSOR.unCompressByte(data, copySourcePoint, dimensionColumnChunk.data_page_length);
+        COMPRESSOR.unCompressByte(data, 0, dimensionColumnChunk.data_page_length);
     copySourcePoint += dimensionColumnChunk.data_page_length;
     // if row id block is present then read the row id chunk and uncompress it
     if (hasEncoding(dimensionColumnChunk.encoders, Encoding.INVERTED_INDEX)) {
+      byte[] dataInv = new byte[dimensionColumnChunk.rowid_page_length];
+      rawData.position(copySourcePoint);
+      rawData.get(dataInv);
       invertedIndexes = CarbonUtil
-          .getUnCompressColumnIndex(dimensionColumnChunk.rowid_page_length, data, numberComressor,
-              copySourcePoint);
+          .getUnCompressColumnIndex(dimensionColumnChunk.rowid_page_length, dataInv,
+              numberComressor, 0);
       copySourcePoint += dimensionColumnChunk.rowid_page_length;
       // get the reverse index
       invertedIndexesReverse = getInvertedReverseIndex(invertedIndexes);
@@ -164,11 +220,13 @@ public class CompressedDimensionChunkFileBasedReaderV2 extends AbstractChunkRead
     // if rle is applied then read the rle block chunk and then uncompress
     //then actual data based on rle block
     if (hasEncoding(dimensionColumnChunk.encoders, Encoding.RLE)) {
+      byte[] dataRle = new byte[dimensionColumnChunk.rle_page_length];
+      rawData.position(copySourcePoint);
+      rawData.get(dataRle);
       rlePage =
-          numberComressor.unCompress(data, copySourcePoint, dimensionColumnChunk.rle_page_length);
+          numberComressor.unCompress(dataRle, 0, dimensionColumnChunk.rle_page_length);
       // uncompress the data with rle indexes
       dataPage = UnBlockIndexer.uncompressData(dataPage, rlePage, eachColumnValueSize[blockIndex]);
-      rlePage = null;
     }
     // fill chunk attributes
     DimensionColumnDataChunk columnDataChunk = null;
@@ -194,85 +252,6 @@ public class CompressedDimensionChunkFileBasedReaderV2 extends AbstractChunkRead
   }
 
   /**
-   * Below method will be used to read the dimension chunks in group.
-   * This is to enhance the IO performance. Will read the data from start index
-   * to end index(including)
-   *
-   * @param fileReader      stream used for reading
-   * @param startBlockIndex start block index
-   * @param endBlockIndex   end block index
-   * @return dimension column chunk array
-   */
-  private DimensionColumnDataChunk[] readDimensionChunksInGroup(FileHolder fileReader,
-      int startBlockIndex, int endBlockIndex) throws IOException {
-    long currentDimensionOffset = dimensionChunksOffset.get(startBlockIndex);
-    byte[] data = fileReader.readByteArray(filePath, currentDimensionOffset,
-        (int) (dimensionChunksOffset.get(endBlockIndex + 1) - currentDimensionOffset));
-    int copySourcePoint = 0;
-    // read the column chunk based on block index and add
-    DimensionColumnDataChunk[] dataChunks =
-        new DimensionColumnDataChunk[endBlockIndex - startBlockIndex + 1];
-    byte[] dataPage = null;
-    int[] invertedIndexes = null;
-    int[] invertedIndexesReverse = null;
-    int[] rlePage = null;
-    DataChunk2 dimensionColumnChunk = null;
-    int index = 0;
-    for (int i = startBlockIndex; i <= endBlockIndex; i++) {
-      invertedIndexes = null;
-      invertedIndexesReverse = null;
-      dimensionColumnChunk =
-          CarbonUtil.readDataChunk(data, copySourcePoint, dimensionChunksLength.get(i));
-      copySourcePoint += dimensionChunksLength.get(i);
-      // first read the data and uncompressed it
-      dataPage =
-          COMPRESSOR.unCompressByte(data, copySourcePoint, dimensionColumnChunk.data_page_length);
-      copySourcePoint += dimensionColumnChunk.data_page_length;
-      // if row id block is present then read the row id chunk and uncompress it
-      if (hasEncoding(dimensionColumnChunk.encoders, Encoding.INVERTED_INDEX)) {
-        invertedIndexes = CarbonUtil
-            .getUnCompressColumnIndex(dimensionColumnChunk.rowid_page_length, data, numberComressor,
-                copySourcePoint);
-        copySourcePoint += dimensionColumnChunk.rowid_page_length;
-        // get the reverse index
-        invertedIndexesReverse = getInvertedReverseIndex(invertedIndexes);
-      }
-      // if rle is applied then read the rle block chunk and then uncompress
-      //then actual data based on rle block
-      if (hasEncoding(dimensionColumnChunk.encoders, Encoding.RLE)) {
-        // read and uncompress the rle block
-        rlePage =
-            numberComressor.unCompress(data, copySourcePoint, dimensionColumnChunk.rle_page_length);
-        copySourcePoint += dimensionColumnChunk.rle_page_length;
-        // uncompress the data with rle indexes
-        dataPage = UnBlockIndexer.uncompressData(dataPage, rlePage, eachColumnValueSize[i]);
-        rlePage = null;
-      }
-      // fill chunk attributes
-      DimensionColumnDataChunk columnDataChunk = null;
-      if (dimensionColumnChunk.isRowMajor()) {
-        // to store fixed length column chunk values
-        columnDataChunk =
-            new ColumnGroupDimensionDataChunk(dataPage, eachColumnValueSize[i], numberOfRows);
-      }
-      // if no dictionary column then first create a no dictionary column chunk
-      // and set to data chunk instance
-      else if (!hasEncoding(dimensionColumnChunk.encoders, Encoding.DICTIONARY)) {
-        columnDataChunk =
-            new VariableLengthDimensionDataChunk(dataPage, invertedIndexes, invertedIndexesReverse,
-                numberOfRows);
-      } else {
-        // to store fixed length column chunk values
-        columnDataChunk =
-            new FixedLengthDimensionDataChunk(dataPage, invertedIndexes, invertedIndexesReverse,
-                numberOfRows, eachColumnValueSize[i]);
-      }
-      dataChunks[index++] = columnDataChunk;
-    }
-    return dataChunks;
-  }
-
-  /**
    * Below method will be used to check whether particular encoding is present
    * in the dimension or not
    *

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v1/CompressedMeasureChunkFileBasedReaderV1.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v1/CompressedMeasureChunkFileBasedReaderV1.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v1/CompressedMeasureChunkFileBasedReaderV1.java
index 750da37..107c430 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v1/CompressedMeasureChunkFileBasedReaderV1.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v1/CompressedMeasureChunkFileBasedReaderV1.java
@@ -17,10 +17,12 @@
 package org.apache.carbondata.core.datastore.chunk.reader.measure.v1;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.List;
 
 import org.apache.carbondata.core.datastore.FileHolder;
 import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.MeasureRawColumnChunk;
 import org.apache.carbondata.core.datastore.chunk.reader.measure.AbstractMeasureChunkReader;
 import org.apache.carbondata.core.datastore.compression.ReaderCompressModel;
 import org.apache.carbondata.core.datastore.compression.ValueCompressionHolder;
@@ -59,12 +61,12 @@ public class CompressedMeasureChunkFileBasedReaderV1 extends AbstractMeasureChun
    * @param blockIndexes blocks to be read
    * @return measure data chunks
    */
-  @Override public MeasureColumnDataChunk[] readMeasureChunks(final FileHolder fileReader,
-      final int[][] blockIndexes) throws IOException {
-    MeasureColumnDataChunk[] datChunk = new MeasureColumnDataChunk[measureColumnChunks.size()];
+  @Override public MeasureRawColumnChunk[] readRawMeasureChunks(FileHolder fileReader,
+      int[][] blockIndexes) throws IOException {
+    MeasureRawColumnChunk[] datChunk = new MeasureRawColumnChunk[measureColumnChunks.size()];
     for (int i = 0; i < blockIndexes.length; i++) {
       for (int j = blockIndexes[i][0]; j <= blockIndexes[i][1]; j++) {
-        datChunk[j] = readMeasureChunk(fileReader, j);
+        datChunk[j] = readRawMeasureChunk(fileReader, j);
       }
     }
     return datChunk;
@@ -77,20 +79,40 @@ public class CompressedMeasureChunkFileBasedReaderV1 extends AbstractMeasureChun
    * @param blockIndex block to be read
    * @return measure data chunk
    */
-  @Override public MeasureColumnDataChunk readMeasureChunk(final FileHolder fileReader,
-      final int blockIndex) throws IOException {
-    ValueEncoderMeta meta = measureColumnChunks.get(blockIndex).getValueEncoderMeta().get(0);
+  @Override public MeasureRawColumnChunk readRawMeasureChunk(FileHolder fileReader, int blockIndex)
+      throws IOException {
+    DataChunk dataChunk = measureColumnChunks.get(blockIndex);
+    ByteBuffer buffer =
+        ByteBuffer.allocateDirect(dataChunk.getDataPageLength());
+    fileReader
+        .readByteBuffer(filePath, buffer, dataChunk.getDataPageOffset(),
+            dataChunk.getDataPageLength());
+    MeasureRawColumnChunk rawColumnChunk = new MeasureRawColumnChunk(blockIndex, buffer, 0,
+        dataChunk.getDataPageLength(), this);
+    rawColumnChunk.setFileReader(fileReader);
+    rawColumnChunk.setPagesCount(1);
+    rawColumnChunk.setRowCount(new int[] { numberOfRows });
+    return rawColumnChunk;
+  }
+
+  @Override
+  public MeasureColumnDataChunk convertToMeasureChunk(MeasureRawColumnChunk measureRawColumnChunk,
+      int pageNumber) throws IOException {
+    int blockIndex = measureRawColumnChunk.getBlockletId();
+    DataChunk dataChunk = measureColumnChunks.get(blockIndex);
+    ValueEncoderMeta meta = dataChunk.getValueEncoderMeta().get(0);
     ReaderCompressModel compressModel = ValueCompressionUtil.getReaderCompressModel(meta);
 
     ValueCompressionHolder values = compressModel.getValueCompressionHolder();
-    byte[] dataPage = fileReader
-            .readByteArray(filePath, measureColumnChunks.get(blockIndex).getDataPageOffset(),
-                    measureColumnChunks.get(blockIndex).getDataPageLength());
+    byte[] dataPage = new byte[measureRawColumnChunk.getLength()];
+    ByteBuffer rawData = measureRawColumnChunk.getRawData();
+    rawData.position(measureRawColumnChunk.getOffSet());
+    rawData.get(dataPage);
 
     // unCompress data
     values.uncompress(compressModel.getConvertedDataType(), dataPage, 0,
-            measureColumnChunks.get(blockIndex).getDataPageLength(), compressModel.getMantissa(),
-            compressModel.getMaxValue(), numberOfRows);
+        dataChunk.getDataPageLength(), compressModel.getMantissa(),
+        compressModel.getMaxValue(), numberOfRows);
 
     CarbonReadDataHolder measureDataHolder = new CarbonReadDataHolder(values);
 
@@ -99,8 +121,7 @@ public class CompressedMeasureChunkFileBasedReaderV1 extends AbstractMeasureChun
     datChunk.setMeasureDataHolder(measureDataHolder);
     // set the enun value indexes
     datChunk
-        .setNullValueIndexHolder(measureColumnChunks.get(blockIndex).getNullValueIndexForColumn());
+        .setNullValueIndexHolder(dataChunk.getNullValueIndexForColumn());
     return datChunk;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v2/CompressedMeasureChunkFileBasedReaderV2.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v2/CompressedMeasureChunkFileBasedReaderV2.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v2/CompressedMeasureChunkFileBasedReaderV2.java
index d92da61..7ac1578 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v2/CompressedMeasureChunkFileBasedReaderV2.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v2/CompressedMeasureChunkFileBasedReaderV2.java
@@ -17,12 +17,14 @@
 package org.apache.carbondata.core.datastore.chunk.reader.measure.v2;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.BitSet;
 import java.util.List;
 
 import org.apache.carbondata.core.datastore.FileHolder;
 import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.MeasureRawColumnChunk;
 import org.apache.carbondata.core.datastore.chunk.reader.measure.AbstractMeasureChunkReader;
 import org.apache.carbondata.core.datastore.compression.CompressorFactory;
 import org.apache.carbondata.core.datastore.compression.ValueCompressionHolder;
@@ -47,7 +49,7 @@ public class CompressedMeasureChunkFileBasedReaderV2 extends AbstractMeasureChun
   /**
    * measure column chunks length
    */
-  private List<Short> measureColumnChunkLength;
+  private List<Integer> measureColumnChunkLength;
 
   /**
    * Constructor to get minimum parameter to create instance of this class
@@ -90,28 +92,28 @@ public class CompressedMeasureChunkFileBasedReaderV2 extends AbstractMeasureChun
    * @return measure column chunks
    * @throws IOException
    */
-  public MeasureColumnDataChunk[] readMeasureChunks(FileHolder fileReader, int[][] blockIndexes)
+  public MeasureRawColumnChunk[] readRawMeasureChunks(FileHolder fileReader, int[][] blockIndexes)
       throws IOException {
     // read the column chunk based on block index and add
-    MeasureColumnDataChunk[] dataChunks =
-        new MeasureColumnDataChunk[measureColumnChunkOffsets.size()];
+    MeasureRawColumnChunk[] dataChunks =
+        new MeasureRawColumnChunk[measureColumnChunkOffsets.size()];
     if (blockIndexes.length == 0) {
       return dataChunks;
     }
-    MeasureColumnDataChunk[] groupChunk = null;
+    MeasureRawColumnChunk[] groupChunk = null;
     int index = 0;
     for (int i = 0; i < blockIndexes.length - 1; i++) {
       index = 0;
-      groupChunk = readMeasureChunksInGroup(fileReader, blockIndexes[i][0], blockIndexes[i][1]);
+      groupChunk = readRawMeasureChunksInGroup(fileReader, blockIndexes[i][0], blockIndexes[i][1]);
       for (int j = blockIndexes[i][0]; j <= blockIndexes[i][1]; j++) {
         dataChunks[j] = groupChunk[index++];
       }
     }
     if (blockIndexes[blockIndexes.length - 1][0] == measureColumnChunkOffsets.size() - 1) {
       dataChunks[blockIndexes[blockIndexes.length - 1][0]] =
-          readMeasureChunk(fileReader, blockIndexes[blockIndexes.length - 1][0]);
+          readRawMeasureChunk(fileReader, blockIndexes[blockIndexes.length - 1][0]);
     } else {
-      groupChunk = readMeasureChunksInGroup(fileReader, blockIndexes[blockIndexes.length - 1][0],
+      groupChunk = readRawMeasureChunksInGroup(fileReader, blockIndexes[blockIndexes.length - 1][0],
           blockIndexes[blockIndexes.length - 1][1]);
       index = 0;
       for (int j = blockIndexes[blockIndexes.length - 1][0];
@@ -122,36 +124,75 @@ public class CompressedMeasureChunkFileBasedReaderV2 extends AbstractMeasureChun
     return dataChunks;
   }
 
-  /**
-   * Method to read the blocks data based on block index
-   *
-   * @param fileReader file reader to read the blocks
-   * @param blockIndex block to be read
-   * @return measure data chunk
-   * @throws IOException
-   */
-  @Override public MeasureColumnDataChunk readMeasureChunk(FileHolder fileReader, int blockIndex)
+  @Override public MeasureRawColumnChunk readRawMeasureChunk(FileHolder fileReader, int blockIndex)
       throws IOException {
+    int dataLength = 0;
+    if (measureColumnChunkOffsets.size() - 1 == blockIndex) {
+      dataLength = measureColumnChunkLength.get(blockIndex);
+    } else {
+      long currentMeasureOffset = measureColumnChunkOffsets.get(blockIndex);
+      dataLength = (int) (measureColumnChunkOffsets.get(blockIndex + 1) - currentMeasureOffset);
+    }
+    ByteBuffer buffer = ByteBuffer.allocateDirect(dataLength);
+    synchronized (fileReader) {
+      fileReader
+          .readByteBuffer(filePath, buffer, measureColumnChunkOffsets.get(blockIndex), dataLength);
+    }
+    MeasureRawColumnChunk rawColumnChunk =
+        new MeasureRawColumnChunk(blockIndex, buffer, 0, dataLength, this);
+    rawColumnChunk.setFileReader(fileReader);
+    rawColumnChunk.setPagesCount(1);
+    rawColumnChunk.setRowCount(new int[] { numberOfRows });
+    return rawColumnChunk;
+  }
+
+  private MeasureRawColumnChunk[] readRawMeasureChunksInGroup(FileHolder fileReader,
+      int startBlockIndex, int endBlockIndex) throws IOException {
+    long currentMeasureOffset = measureColumnChunkOffsets.get(startBlockIndex);
+    ByteBuffer buffer = ByteBuffer.allocateDirect(
+        (int) (measureColumnChunkOffsets.get(endBlockIndex + 1) - currentMeasureOffset));
+    synchronized (fileReader) {
+      fileReader.readByteBuffer(filePath, buffer, currentMeasureOffset,
+          (int) (measureColumnChunkOffsets.get(endBlockIndex + 1) - currentMeasureOffset));
+    }
+    MeasureRawColumnChunk[] dataChunks =
+        new MeasureRawColumnChunk[endBlockIndex - startBlockIndex + 1];
+    int runningLength = 0;
+    int index = 0;
+    for (int i = startBlockIndex; i <= endBlockIndex; i++) {
+      int currentLength =
+          (int) (measureColumnChunkOffsets.get(i + 1) - measureColumnChunkOffsets.get(i));
+      MeasureRawColumnChunk measureRawColumnChunk =
+          new MeasureRawColumnChunk(i, buffer, runningLength, currentLength, this);
+      measureRawColumnChunk.setFileReader(fileReader);
+      measureRawColumnChunk.setRowCount(new int[] { numberOfRows });
+      measureRawColumnChunk.setPagesCount(1);
+      dataChunks[index] = measureRawColumnChunk;
+      runningLength += currentLength;
+      index++;
+    }
+    return dataChunks;
+  }
+
+  public MeasureColumnDataChunk convertToMeasureChunk(MeasureRawColumnChunk measureRawColumnChunk,
+      int pageNumber) throws IOException {
     MeasureColumnDataChunk datChunk = new MeasureColumnDataChunk();
     DataChunk2 measureColumnChunk = null;
-    byte[] measureDataChunk = null;
-    byte[] data = null;
-    int copyPoint = 0;
+    int copyPoint = measureRawColumnChunk.getOffSet();
+    int blockIndex = measureRawColumnChunk.getBlockletId();
+    ByteBuffer rawData = measureRawColumnChunk.getRawData();
     if (measureColumnChunkOffsets.size() - 1 == blockIndex) {
-      measureDataChunk = fileReader
-          .readByteArray(filePath, measureColumnChunkOffsets.get(blockIndex),
-              measureColumnChunkLength.get(blockIndex));
-      measureColumnChunk = CarbonUtil
-          .readDataChunk(measureDataChunk, copyPoint, measureColumnChunkLength.get(blockIndex));
-      data = fileReader.readByteArray(filePath,
-          measureColumnChunkOffsets.get(blockIndex) + measureColumnChunkLength.get(blockIndex),
-          measureColumnChunk.data_page_length);
+      measureColumnChunk =
+          CarbonUtil.readDataChunk(rawData, copyPoint, measureColumnChunkLength.get(blockIndex));
+      synchronized (measureRawColumnChunk.getFileReader()) {
+        rawData = ByteBuffer.allocateDirect(measureColumnChunk.data_page_length);
+        measureRawColumnChunk.getFileReader().readByteBuffer(filePath, rawData,
+            measureColumnChunkOffsets.get(blockIndex) + measureColumnChunkLength.get(blockIndex),
+            measureColumnChunk.data_page_length);
+      }
     } else {
-      long currentMeasureOffset = measureColumnChunkOffsets.get(blockIndex);
-      data = fileReader.readByteArray(filePath, currentMeasureOffset,
-          (int) (measureColumnChunkOffsets.get(blockIndex + 1) - currentMeasureOffset));
       measureColumnChunk =
-          CarbonUtil.readDataChunk(data, copyPoint, measureColumnChunkLength.get(blockIndex));
+          CarbonUtil.readDataChunk(rawData, copyPoint, measureColumnChunkLength.get(blockIndex));
       copyPoint += measureColumnChunkLength.get(blockIndex);
     }
     List<ValueEncoderMeta> valueEncodeMeta = new ArrayList<>();
@@ -162,11 +203,13 @@ public class CompressedMeasureChunkFileBasedReaderV2 extends AbstractMeasureChun
     WriterCompressModel compressionModel = CarbonUtil.getValueCompressionModel(valueEncodeMeta);
 
     ValueCompressionHolder values = compressionModel.getValueCompressionHolder()[0];
-
+    byte[] data = new byte[measureColumnChunk.data_page_length];
+    rawData.position(copyPoint);
+    rawData.get(data);
     // uncompress
-    values.uncompress(compressionModel.getConvertedDataType()[0], data,
-        copyPoint, measureColumnChunk.data_page_length, compressionModel.getMantissa()[0],
-            compressionModel.getMaxValue()[0], numberOfRows);
+    values.uncompress(compressionModel.getConvertedDataType()[0], data, 0,
+        measureColumnChunk.data_page_length, compressionModel.getMantissa()[0],
+        compressionModel.getMaxValue()[0], numberOfRows);
 
     CarbonReadDataHolder measureDataHolder = new CarbonReadDataHolder(values);
 
@@ -177,58 +220,4 @@ public class CompressedMeasureChunkFileBasedReaderV2 extends AbstractMeasureChun
     datChunk.setNullValueIndexHolder(getPresenceMeta(measureColumnChunk.presence));
     return datChunk;
   }
-
-  /**
-   * Below method will be used to read the dimension chunks in group. This is
-   * to enhance the IO performance. Will read the data from start index to end
-   * index(including)
-   *
-   * @param fileReader      stream used for reading
-   * @param startBlockIndex start block index
-   * @param endBlockIndex   end block index
-   * @return measure column chunk array
-   * @throws IOException
-   */
-  private MeasureColumnDataChunk[] readMeasureChunksInGroup(FileHolder fileReader,
-      int startBlockIndex, int endBlockIndex) throws IOException {
-    long currentMeasureOffset = measureColumnChunkOffsets.get(startBlockIndex);
-    byte[] data = fileReader.readByteArray(filePath, currentMeasureOffset,
-        (int) (measureColumnChunkOffsets.get(endBlockIndex + 1) - currentMeasureOffset));
-    MeasureColumnDataChunk[] dataChunks =
-        new MeasureColumnDataChunk[endBlockIndex - startBlockIndex + 1];
-    MeasureColumnDataChunk dataChunk = null;
-    int index = 0;
-    int copyPoint = 0;
-    DataChunk2 measureColumnChunk = null;
-    for (int i = startBlockIndex; i <= endBlockIndex; i++) {
-      dataChunk = new MeasureColumnDataChunk();
-      measureColumnChunk =
-          CarbonUtil.readDataChunk(data, copyPoint, measureColumnChunkLength.get(i));
-      copyPoint += measureColumnChunkLength.get(i);
-      List<ValueEncoderMeta> valueEncodeMeta = new ArrayList<>();
-      for (int j = 0; j < measureColumnChunk.getEncoder_meta().size(); j++) {
-        valueEncodeMeta.add(
-            CarbonUtil.deserializeEncoderMeta(measureColumnChunk.getEncoder_meta().get(j).array()));
-      }
-      WriterCompressModel compressionModel = CarbonUtil.getValueCompressionModel(valueEncodeMeta);
-
-      ValueCompressionHolder values = compressionModel.getValueCompressionHolder()[0];
-
-      // uncompress
-      values.uncompress(compressionModel.getConvertedDataType()[0], data, copyPoint,
-              measureColumnChunk.data_page_length, compressionModel.getMantissa()[0],
-              compressionModel.getMaxValue()[0], numberOfRows);
-
-      CarbonReadDataHolder measureDataHolder = new CarbonReadDataHolder(values);
-
-      copyPoint += measureColumnChunk.data_page_length;
-      // set the data chunk
-      dataChunk.setMeasureDataHolder(measureDataHolder);
-
-      // set the enun value indexes
-      dataChunk.setNullValueIndexHolder(getPresenceMeta(measureColumnChunk.presence));
-      dataChunks[index++] = dataChunk;
-    }
-    return dataChunks;
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/columnar/UnBlockIndexer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/columnar/UnBlockIndexer.java b/core/src/main/java/org/apache/carbondata/core/datastore/columnar/UnBlockIndexer.java
index ef8fff7..a7f38cd 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/columnar/UnBlockIndexer.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/columnar/UnBlockIndexer.java
@@ -27,14 +27,17 @@ public final class UnBlockIndexer {
 
   public static int[] uncompressIndex(int[] indexData, int[] indexMap) {
     int actualSize = indexData.length;
-    for (int i = 0; i < indexMap.length; i++) {
+    int mapLength = indexMap.length;
+    for (int i = 0; i < mapLength; i++) {
       actualSize += indexData[indexMap[i] + 1] - indexData[indexMap[i]] - 1;
     }
     int[] indexes = new int[actualSize];
     int k = 0;
+    int oldIndex = 0;
     for (int i = 0; i < indexData.length; i++) {
-      int index = Arrays.binarySearch(indexMap, i);
+      int index = Arrays.binarySearch(indexMap, oldIndex, mapLength, i);
       if (index > -1) {
+        oldIndex = index;
         for (int j = indexData[indexMap[index]]; j <= indexData[indexMap[index] + 1]; j++) {
           indexes[k] = j;
           k++;

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/impl/DFSFileHolderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/DFSFileHolderImpl.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/DFSFileHolderImpl.java
index 8a97cbf..dcd74c5 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/DFSFileHolderImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/DFSFileHolderImpl.java
@@ -17,6 +17,7 @@
 package org.apache.carbondata.core.datastore.impl;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -129,4 +130,12 @@ public class DFSFileHolderImpl implements FileHolder {
     FSDataInputStream fileChannel = updateCache(filePath);
     return fileChannel.readInt();
   }
+
+  @Override
+  public void readByteBuffer(String filePath, ByteBuffer byteBuffer,
+      long offset, int length) throws IOException {
+    byte[] readByteArray = readByteArray(filePath, offset, length);
+    byteBuffer.put(readByteArray);
+    byteBuffer.rewind();
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileHolderImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileHolderImpl.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileHolderImpl.java
index 1f073a8..d78c28e 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileHolderImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileHolderImpl.java
@@ -192,5 +192,13 @@ public class FileHolderImpl implements FileHolder {
     ByteBuffer byteBffer = read(fileChannel, CarbonCommonConstants.LONG_SIZE_IN_BYTE, offset);
     return byteBffer.getLong();
   }
+  @Override
+  public void readByteBuffer(String filePath, ByteBuffer byteBuffer,
+      long offset, int length) throws IOException {
+    FileChannel fileChannel = updateCache(filePath);
+    fileChannel.position(offset);
+    fileChannel.read(byteBuffer);
+    byteBuffer.rewind();
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/impl/btree/AbstractBTreeLeafNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/btree/AbstractBTreeLeafNode.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/btree/AbstractBTreeLeafNode.java
index 2bb4d83..dfd35bc 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/btree/AbstractBTreeLeafNode.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/btree/AbstractBTreeLeafNode.java
@@ -22,8 +22,8 @@ import org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache
 import org.apache.carbondata.core.datastore.DataRefNode;
 import org.apache.carbondata.core.datastore.FileHolder;
 import org.apache.carbondata.core.datastore.IndexKey;
-import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
-import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.MeasureRawColumnChunk;
 
 /**
  * Non leaf node abstract class
@@ -177,7 +177,7 @@ public abstract class AbstractBTreeLeafNode implements BTreeNode {
    * @param blockIndexes indexes of the blocks need to be read
    * @return dimension data chunks
    */
-  @Override public DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader,
+  @Override public DimensionRawColumnChunk[] getDimensionChunks(FileHolder fileReader,
       int[][] blockIndexes) throws IOException {
     // No required here as leaf which will will be use this class will implement its own get
     // dimension chunks
@@ -191,7 +191,7 @@ public abstract class AbstractBTreeLeafNode implements BTreeNode {
    * @param blockIndex block index to be read
    * @return dimension data chunk
    */
-  @Override public DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader,
+  @Override public DimensionRawColumnChunk getDimensionChunk(FileHolder fileReader,
       int blockIndex) throws IOException {
     // No required here as leaf which will will be use this class will implement
     // its own get dimension chunks
@@ -205,7 +205,7 @@ public abstract class AbstractBTreeLeafNode implements BTreeNode {
    * @param blockIndexes block indexes to be read from file
    * @return measure column data chunk
    */
-  @Override public MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader,
+  @Override public MeasureRawColumnChunk[] getMeasureChunks(FileHolder fileReader,
       int[][] blockIndexes) throws IOException {
     // No required here as leaf which will will be use this class will implement its own get
     // measure chunks
@@ -219,7 +219,7 @@ public abstract class AbstractBTreeLeafNode implements BTreeNode {
    * @param blockIndex block index to be read from file
    * @return measure data chunk
    */
-  @Override public MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex)
+  @Override public MeasureRawColumnChunk getMeasureChunk(FileHolder fileReader, int blockIndex)
       throws IOException {
     // No required here as leaf which will will be use this class will implement its own get
     // measure chunks

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/72cb415a/core/src/main/java/org/apache/carbondata/core/datastore/impl/btree/BTreeNonLeafNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/btree/BTreeNonLeafNode.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/btree/BTreeNonLeafNode.java
index 404aad7..8e5976d 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/btree/BTreeNonLeafNode.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/btree/BTreeNonLeafNode.java
@@ -24,8 +24,8 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.DataRefNode;
 import org.apache.carbondata.core.datastore.FileHolder;
 import org.apache.carbondata.core.datastore.IndexKey;
-import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
-import org.apache.carbondata.core.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
+import org.apache.carbondata.core.datastore.chunk.impl.MeasureRawColumnChunk;
 
 /**
  * No leaf node of a b+tree class which will keep the matadata(start key) of the
@@ -170,7 +170,7 @@ public class BTreeNonLeafNode implements BTreeNode {
    * @param blockIndexes indexes of the blocks need to be read
    * @return dimension data chunks
    */
-  @Override public DimensionColumnDataChunk[] getDimensionChunks(FileHolder fileReader,
+  @Override public DimensionRawColumnChunk[] getDimensionChunks(FileHolder fileReader,
       int[][] blockIndexes) {
 
     // operation of getting the dimension chunks is not supported as its a
@@ -187,7 +187,7 @@ public class BTreeNonLeafNode implements BTreeNode {
    * @param fileReader file reader to read the chunk from file
    * @return dimension data chunk
    */
-  @Override public DimensionColumnDataChunk getDimensionChunk(FileHolder fileReader,
+  @Override public DimensionRawColumnChunk getDimensionChunk(FileHolder fileReader,
       int blockIndexes) {
     // operation of getting the dimension chunk is not supported as its a
     // non leaf node
@@ -204,7 +204,7 @@ public class BTreeNonLeafNode implements BTreeNode {
    * @param blockIndexes block indexes to be read from file
    * @return measure column data chunk
    */
-  @Override public MeasureColumnDataChunk[] getMeasureChunks(FileHolder fileReader,
+  @Override public MeasureRawColumnChunk[] getMeasureChunks(FileHolder fileReader,
       int[][] blockIndexes) {
     // operation of getting the measure chunk is not supported as its a non
     // leaf node
@@ -222,7 +222,7 @@ public class BTreeNonLeafNode implements BTreeNode {
    * @return measure data chunk
    */
 
-  @Override public MeasureColumnDataChunk getMeasureChunk(FileHolder fileReader, int blockIndex) {
+  @Override public MeasureRawColumnChunk getMeasureChunk(FileHolder fileReader, int blockIndex) {
     // operation of getting the measure chunk is not supported as its a non
     // leaf node
     // and in case of B+Tree data will be stored only in leaf node and