You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ch...@apache.org on 2016/08/15 07:09:10 UTC

[25/52] [partial] incubator-carbondata git commit: Renamed packages to org.apache.carbondata and fixed errors

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java b/core/src/main/java/org/apache/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java
new file mode 100644
index 0000000..2abe39a
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/iterator/AbstractDetailQueryResultIterator.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.result.iterator;
+
+import java.util.List;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.DataRefNode;
+import org.apache.carbondata.core.carbon.datastore.DataRefNodeFinder;
+import org.apache.carbondata.core.carbon.datastore.impl.btree.BTreeDataRefNodeFinder;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.model.QueryModel;
+import org.apache.carbondata.scan.processor.AbstractDataBlockIterator;
+import org.apache.carbondata.scan.processor.impl.DataBlockIteratorImpl;
+
+/**
+ * In case of detail query we cannot keep all the records in memory so for
+ * executing that query are returning a iterator over block and every time next
+ * call will come it will execute the block and return the result
+ */
+public abstract class AbstractDetailQueryResultIterator extends CarbonIterator {
+
+  /**
+   * LOGGER.
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(AbstractDetailQueryResultIterator.class.getName());
+
+  /**
+   * execution info of the block
+   */
+  protected List<BlockExecutionInfo> blockExecutionInfos;
+
+  /**
+   * number of cores which can be used
+   */
+  private int batchSize;
+
+  /**
+   * file reader which will be used to execute the query
+   */
+  protected FileHolder fileReader;
+
+  protected AbstractDataBlockIterator dataBlockIterator;
+
+  protected boolean nextBatch = false;
+
+  public AbstractDetailQueryResultIterator(List<BlockExecutionInfo> infos, QueryModel queryModel) {
+    String batchSizeString =
+        CarbonProperties.getInstance().getProperty(CarbonCommonConstants.DETAIL_QUERY_BATCH_SIZE);
+    if (null != batchSizeString) {
+      try {
+        batchSize = Integer.parseInt(batchSizeString);
+      } catch (NumberFormatException ne) {
+        LOGGER.error("Invalid inmemory records size. Using default value");
+        batchSize = CarbonCommonConstants.DETAIL_QUERY_BATCH_SIZE_DEFAULT;
+      }
+    } else {
+      batchSize = CarbonCommonConstants.DETAIL_QUERY_BATCH_SIZE_DEFAULT;
+    }
+
+    this.blockExecutionInfos = infos;
+    this.fileReader = FileFactory.getFileHolder(
+        FileFactory.getFileType(queryModel.getAbsoluteTableIdentifier().getStorePath()));
+    intialiseInfos();
+  }
+
+  private void intialiseInfos() {
+    for (BlockExecutionInfo blockInfo : blockExecutionInfos) {
+      DataRefNodeFinder finder = new BTreeDataRefNodeFinder(blockInfo.getEachColumnValueSize());
+      DataRefNode startDataBlock = finder
+          .findFirstDataBlock(blockInfo.getDataBlock().getDataRefNode(), blockInfo.getStartKey());
+      DataRefNode endDataBlock = finder
+          .findLastDataBlock(blockInfo.getDataBlock().getDataRefNode(), blockInfo.getEndKey());
+      long numberOfBlockToScan = endDataBlock.nodeNumber() - startDataBlock.nodeNumber() + 1;
+      blockInfo.setFirstDataBlock(startDataBlock);
+      blockInfo.setNumberOfBlockToScan(numberOfBlockToScan);
+    }
+  }
+
+  @Override public boolean hasNext() {
+    if ((dataBlockIterator != null && dataBlockIterator.hasNext()) || nextBatch) {
+      return true;
+    } else {
+      return blockExecutionInfos.size() > 0;
+    }
+  }
+
+  protected void updateDataBlockIterator() {
+    if (dataBlockIterator == null || !dataBlockIterator.hasNext()) {
+      dataBlockIterator = getDataBlockIterator();
+      while (dataBlockIterator != null && !dataBlockIterator.hasNext()) {
+        dataBlockIterator = getDataBlockIterator();
+      }
+    }
+  }
+
+  private DataBlockIteratorImpl getDataBlockIterator() {
+    if(blockExecutionInfos.size() > 0) {
+      BlockExecutionInfo executionInfo = blockExecutionInfos.get(0);
+      blockExecutionInfos.remove(executionInfo);
+      return new DataBlockIteratorImpl(executionInfo, fileReader, batchSize);
+    }
+    return null;
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/iterator/ChunkRowIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/iterator/ChunkRowIterator.java b/core/src/main/java/org/apache/carbondata/scan/result/iterator/ChunkRowIterator.java
new file mode 100644
index 0000000..680b374
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/iterator/ChunkRowIterator.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.scan.result.iterator;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.scan.result.BatchResult;
+
+/**
+ * Iterator over row result
+ */
+public class ChunkRowIterator extends CarbonIterator<Object[]> {
+
+  /**
+   * iterator over chunk result
+   */
+  private CarbonIterator<BatchResult> iterator;
+
+  /**
+   * currect chunk
+   */
+  private BatchResult currentchunk;
+
+  public ChunkRowIterator(CarbonIterator<BatchResult> iterator) {
+    this.iterator = iterator;
+    if (iterator.hasNext()) {
+      currentchunk = iterator.next();
+    }
+  }
+
+  /**
+   * Returns {@code true} if the iteration has more elements. (In other words,
+   * returns {@code true} if {@link #next} would return an element rather than
+   * throwing an exception.)
+   *
+   * @return {@code true} if the iteration has more elements
+   */
+  @Override public boolean hasNext() {
+    if (null != currentchunk) {
+      if ((currentchunk.hasNext())) {
+        return true;
+      } else if (!currentchunk.hasNext()) {
+        while (iterator.hasNext()) {
+          currentchunk = iterator.next();
+          if (currentchunk != null && currentchunk.hasNext()) {
+            return true;
+          }
+        }
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Returns the next element in the iteration.
+   *
+   * @return the next element in the iteration
+   */
+  @Override public Object[] next() {
+    return currentchunk.next();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/iterator/DetailQueryResultIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/iterator/DetailQueryResultIterator.java b/core/src/main/java/org/apache/carbondata/scan/result/iterator/DetailQueryResultIterator.java
new file mode 100644
index 0000000..fa804a5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/iterator/DetailQueryResultIterator.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.result.iterator;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.model.QueryModel;
+import org.apache.carbondata.scan.result.BatchResult;
+
+/**
+ * In case of detail query we cannot keep all the records in memory so for
+ * executing that query are returning a iterator over block and every time next
+ * call will come it will execute the block and return the result
+ */
+public class DetailQueryResultIterator extends AbstractDetailQueryResultIterator {
+
+  private ExecutorService execService = Executors.newFixedThreadPool(1);
+
+  private Future<BatchResult> future;
+
+  private final Object lock = new Object();
+
+  public DetailQueryResultIterator(List<BlockExecutionInfo> infos, QueryModel queryModel) {
+    super(infos, queryModel);
+  }
+
+  @Override public BatchResult next() {
+    BatchResult result;
+    try {
+      if (future == null) {
+        future = execute();
+      }
+      result = future.get();
+      nextBatch = false;
+      if (hasNext()) {
+        nextBatch = true;
+        future = execute();
+      } else {
+        execService.shutdown();
+        execService.awaitTermination(1, TimeUnit.HOURS);
+        fileReader.finish();
+      }
+    } catch (Exception ex) {
+      execService.shutdown();
+      fileReader.finish();
+      throw new RuntimeException(ex);
+    }
+    return result;
+  }
+
+  private Future<BatchResult> execute() {
+    return execService.submit(new Callable<BatchResult>() {
+      @Override public BatchResult call() throws QueryExecutionException {
+        BatchResult batchResult = new BatchResult();
+        synchronized (lock) {
+          updateDataBlockIterator();
+          if (dataBlockIterator != null) {
+            batchResult.setRows(dataBlockIterator.next());
+          }
+        }
+        return batchResult;
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/result/iterator/RawResultIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/result/iterator/RawResultIterator.java b/core/src/main/java/org/apache/carbondata/scan/result/iterator/RawResultIterator.java
new file mode 100644
index 0000000..4b20627
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/result/iterator/RawResultIterator.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.result.iterator;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.scan.result.BatchResult;
+import org.apache.carbondata.scan.wrappers.ByteArrayWrapper;
+
+/**
+ * This is a wrapper iterator over the detail raw query iterator.
+ * This iterator will handle the processing of the raw rows.
+ * This will handle the batch results and will iterate on the batches and give single row.
+ */
+public class RawResultIterator extends CarbonIterator<Object[]> {
+
+  private final SegmentProperties sourceSegProperties;
+
+  private final SegmentProperties destinationSegProperties;
+  /**
+   * Iterator of the Batch raw result.
+   */
+  private CarbonIterator<BatchResult> detailRawQueryResultIterator;
+
+  /**
+   * Counter to maintain the row counter.
+   */
+  private int counter = 0;
+
+  private Object[] currentConveretedRawRow = null;
+
+  /**
+   * LOGGER
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(RawResultIterator.class.getName());
+
+  /**
+   * batch of the result.
+   */
+  private BatchResult batch;
+
+  public RawResultIterator(CarbonIterator<BatchResult> detailRawQueryResultIterator,
+      SegmentProperties sourceSegProperties, SegmentProperties destinationSegProperties) {
+    this.detailRawQueryResultIterator = detailRawQueryResultIterator;
+    this.sourceSegProperties = sourceSegProperties;
+    this.destinationSegProperties = destinationSegProperties;
+  }
+
+  @Override public boolean hasNext() {
+
+    if (null == batch || checkIfBatchIsProcessedCompletely(batch)) {
+      if (detailRawQueryResultIterator.hasNext()) {
+        batch = null;
+        batch = detailRawQueryResultIterator.next();
+        counter = 0; // batch changed so reset the counter.
+      } else {
+        return false;
+      }
+    }
+
+    if (!checkIfBatchIsProcessedCompletely(batch)) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  @Override public Object[] next() {
+    if (null == batch) { // for 1st time
+      batch = detailRawQueryResultIterator.next();
+    }
+    if (!checkIfBatchIsProcessedCompletely(batch)) {
+      try {
+        if(null != currentConveretedRawRow){
+          counter++;
+          Object[] currentConveretedRawRowTemp = this.currentConveretedRawRow;
+          currentConveretedRawRow = null;
+          return currentConveretedRawRowTemp;
+        }
+        return convertRow(batch.getRawRow(counter++));
+      } catch (KeyGenException e) {
+        LOGGER.error(e.getMessage());
+        return null;
+      }
+    } else { // completed one batch.
+      batch = null;
+      batch = detailRawQueryResultIterator.next();
+      counter = 0;
+    }
+    try {
+      if(null != currentConveretedRawRow){
+        counter++;
+        Object[] currentConveretedRawRowTemp = this.currentConveretedRawRow;
+        currentConveretedRawRow = null;
+        return currentConveretedRawRowTemp;
+      }
+
+      return convertRow(batch.getRawRow(counter++));
+    } catch (KeyGenException e) {
+      LOGGER.error(e.getMessage());
+      return null;
+    }
+
+  }
+
+  /**
+   * for fetching the row with out incrementing counter.
+   * @return
+   */
+  public Object[] fetchConverted() throws KeyGenException {
+    if(null != currentConveretedRawRow){
+      return currentConveretedRawRow;
+    }
+    if(hasNext())
+    {
+      Object[] rawRow = batch.getRawRow(counter);
+      currentConveretedRawRow = convertRow(rawRow);;
+      return currentConveretedRawRow;
+    }
+    else
+    {
+      return null;
+    }
+  }
+
+  private Object[] convertRow(Object[] rawRow) throws KeyGenException {
+    byte[] dims = ((ByteArrayWrapper) rawRow[0]).getDictionaryKey();
+    long[] keyArray = sourceSegProperties.getDimensionKeyGenerator().getKeyArray(dims);
+    byte[] covertedBytes =
+        destinationSegProperties.getDimensionKeyGenerator().generateKey(keyArray);
+    ((ByteArrayWrapper) rawRow[0]).setDictionaryKey(covertedBytes);
+    return rawRow;
+  }
+
+  /**
+   * To check if the batch is processed completely
+   * @param batch
+   * @return
+   */
+  private boolean checkIfBatchIsProcessedCompletely(BatchResult batch){
+    if(counter < batch.getSize())
+    {
+      return false;
+    }
+    else{
+      return true;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/scanner/AbstractBlockletScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/scanner/AbstractBlockletScanner.java b/core/src/main/java/org/apache/carbondata/scan/scanner/AbstractBlockletScanner.java
new file mode 100644
index 0000000..caee061
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/scanner/AbstractBlockletScanner.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.scanner;
+
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+
+/**
+ * Blocklet scanner class to process the block
+ */
+public abstract class AbstractBlockletScanner implements BlockletScanner {
+
+  /**
+   * scanner result
+   */
+  protected AbstractScannedResult scannedResult;
+
+  /**
+   * block execution info
+   */
+  protected BlockExecutionInfo blockExecutionInfo;
+
+  public AbstractBlockletScanner(BlockExecutionInfo tableBlockExecutionInfos) {
+    this.blockExecutionInfo = tableBlockExecutionInfos;
+  }
+
+  @Override public AbstractScannedResult scanBlocklet(BlocksChunkHolder blocksChunkHolder)
+      throws QueryExecutionException {
+    fillKeyValue(blocksChunkHolder);
+    return scannedResult;
+  }
+
+  protected void fillKeyValue(BlocksChunkHolder blocksChunkHolder) {
+    scannedResult.reset();
+    scannedResult.setMeasureChunks(blocksChunkHolder.getDataBlock()
+        .getMeasureChunks(blocksChunkHolder.getFileReader(),
+            blockExecutionInfo.getAllSelectedMeasureBlocksIndexes()));
+    scannedResult.setNumberOfRows(blocksChunkHolder.getDataBlock().nodeSize());
+
+    scannedResult.setDimensionChunks(blocksChunkHolder.getDataBlock()
+        .getDimensionChunks(blocksChunkHolder.getFileReader(),
+            blockExecutionInfo.getAllSelectedDimensionBlocksIndexes()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/scanner/BlockletScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/scanner/BlockletScanner.java b/core/src/main/java/org/apache/carbondata/scan/scanner/BlockletScanner.java
new file mode 100644
index 0000000..0337197
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/scanner/BlockletScanner.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.scanner;
+
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+
+/**
+ * Interface for processing the block
+ * Processing can be filter based processing or non filter based processing
+ */
+public interface BlockletScanner {
+
+  /**
+   * Below method will used to process the block data and get the scanned result
+   *
+   * @param blocksChunkHolder block chunk which holds the block data
+   * @return scannerResult
+   * result after processing
+   * @throws QueryExecutionException
+   */
+  AbstractScannedResult scanBlocklet(BlocksChunkHolder blocksChunkHolder)
+      throws QueryExecutionException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/scanner/impl/FilterScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/scanner/impl/FilterScanner.java b/core/src/main/java/org/apache/carbondata/scan/scanner/impl/FilterScanner.java
new file mode 100644
index 0000000..1f63a88
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/scanner/impl/FilterScanner.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.scanner.impl;
+
+import java.util.BitSet;
+
+import org.apache.carbondata.core.carbon.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastorage.store.FileHolder;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.scan.processor.BlocksChunkHolder;
+import org.apache.carbondata.scan.result.AbstractScannedResult;
+import org.apache.carbondata.scan.result.impl.FilterQueryScannedResult;
+import org.apache.carbondata.scan.scanner.AbstractBlockletScanner;
+
+/**
+ * Below class will be used for filter query processing
+ * this class will be first apply the filter then it will read the block if
+ * required and return the scanned result
+ */
+public class FilterScanner extends AbstractBlockletScanner {
+
+  /**
+   * filter tree
+   */
+  private FilterExecuter filterExecuter;
+
+  /**
+   * this will be used to apply min max
+   * this will be useful for dimension column which is on the right side
+   * as node finder will always give tentative blocks, if column data stored individually
+   * and data is in sorted order then we can check whether filter is in the range of min max or not
+   * if it present then only we can apply filter on complete data.
+   * this will be very useful in case of sparse data when rows are
+   * repeating.
+   */
+  private boolean isMinMaxEnabled;
+
+  public FilterScanner(BlockExecutionInfo blockExecutionInfo) {
+    super(blockExecutionInfo);
+    scannedResult = new FilterQueryScannedResult(blockExecutionInfo);
+    // to check whether min max is enabled or not
+    String minMaxEnableValue = CarbonProperties.getInstance()
+        .getProperty(CarbonCommonConstants.CARBON_QUERY_MIN_MAX_ENABLED,
+            CarbonCommonConstants.MIN_MAX_DEFAULT_VALUE);
+    if (null != minMaxEnableValue) {
+      isMinMaxEnabled = Boolean.parseBoolean(minMaxEnableValue);
+    }
+    // get the filter tree
+    this.filterExecuter = blockExecutionInfo.getFilterExecuterTree();
+  }
+
+  /**
+   * Below method will be used to process the block
+   *
+   * @param blocksChunkHolder block chunk holder which holds the data
+   * @throws QueryExecutionException
+   * @throws FilterUnsupportedException
+   */
+  @Override public AbstractScannedResult scanBlocklet(BlocksChunkHolder blocksChunkHolder)
+      throws QueryExecutionException {
+    try {
+      fillScannedResult(blocksChunkHolder);
+    } catch (FilterUnsupportedException e) {
+      throw new QueryExecutionException(e.getMessage());
+    }
+    return scannedResult;
+  }
+
+  /**
+   * This method will process the data in below order
+   * 1. first apply min max on the filter tree and check whether any of the filter
+   * is fall on the range of min max, if not then return empty result
+   * 2. If filter falls on min max range then apply filter on actual
+   * data and get the filtered row index
+   * 3. if row index is empty then return the empty result
+   * 4. if row indexes is not empty then read only those blocks(measure or dimension)
+   * which was present in the query but not present in the filter, as while applying filter
+   * some of the blocks where already read and present in chunk holder so not need to
+   * read those blocks again, this is to avoid reading of same blocks which was already read
+   * 5. Set the blocks and filter indexes to result
+   *
+   * @param blocksChunkHolder
+   * @throws FilterUnsupportedException
+   */
+  private void fillScannedResult(BlocksChunkHolder blocksChunkHolder)
+      throws FilterUnsupportedException {
+
+    scannedResult.reset();
+    // apply min max
+    if (isMinMaxEnabled) {
+      BitSet bitSet = this.filterExecuter
+          .isScanRequired(blocksChunkHolder.getDataBlock().getColumnsMaxValue(),
+              blocksChunkHolder.getDataBlock().getColumnsMinValue());
+      if (bitSet.isEmpty()) {
+        scannedResult.setNumberOfRows(0);
+        scannedResult.setIndexes(new int[0]);
+        return;
+      }
+    }
+    // apply filter on actual data
+    BitSet bitSet = this.filterExecuter.applyFilter(blocksChunkHolder);
+    // if indexes is empty then return with empty result
+    if (bitSet.isEmpty()) {
+      scannedResult.setNumberOfRows(0);
+      scannedResult.setIndexes(new int[0]);
+      return;
+    }
+    // get the row indexes from bot set
+    int[] indexes = new int[bitSet.cardinality()];
+    int index = 0;
+    for (int i = bitSet.nextSetBit(0); i >= 0; i = bitSet.nextSetBit(i + 1)) {
+      indexes[index++] = i;
+    }
+
+    FileHolder fileReader = blocksChunkHolder.getFileReader();
+    int[] allSelectedDimensionBlocksIndexes =
+        blockExecutionInfo.getAllSelectedDimensionBlocksIndexes();
+    DimensionColumnDataChunk[] dimensionColumnDataChunk =
+        new DimensionColumnDataChunk[blockExecutionInfo.getTotalNumberDimensionBlock()];
+    // read dimension chunk blocks from file which is not present
+    for (int i = 0; i < allSelectedDimensionBlocksIndexes.length; i++) {
+      if (null == blocksChunkHolder.getDimensionDataChunk()[allSelectedDimensionBlocksIndexes[i]]) {
+        dimensionColumnDataChunk[allSelectedDimensionBlocksIndexes[i]] =
+            blocksChunkHolder.getDataBlock()
+                .getDimensionChunk(fileReader, allSelectedDimensionBlocksIndexes[i]);
+      } else {
+        dimensionColumnDataChunk[allSelectedDimensionBlocksIndexes[i]] =
+            blocksChunkHolder.getDimensionDataChunk()[allSelectedDimensionBlocksIndexes[i]];
+      }
+    }
+    MeasureColumnDataChunk[] measureColumnDataChunk =
+        new MeasureColumnDataChunk[blockExecutionInfo.getTotalNumberOfMeasureBlock()];
+    int[] allSelectedMeasureBlocksIndexes = blockExecutionInfo.getAllSelectedMeasureBlocksIndexes();
+
+    // read the measure chunk blocks which is not present
+    for (int i = 0; i < allSelectedMeasureBlocksIndexes.length; i++) {
+
+      if (null == blocksChunkHolder.getMeasureDataChunk()[allSelectedMeasureBlocksIndexes[i]]) {
+        measureColumnDataChunk[allSelectedMeasureBlocksIndexes[i]] =
+            blocksChunkHolder.getDataBlock()
+                .getMeasureChunk(fileReader, allSelectedMeasureBlocksIndexes[i]);
+      } else {
+        measureColumnDataChunk[allSelectedMeasureBlocksIndexes[i]] =
+            blocksChunkHolder.getMeasureDataChunk()[allSelectedMeasureBlocksIndexes[i]];
+      }
+    }
+    scannedResult.setDimensionChunks(dimensionColumnDataChunk);
+    scannedResult.setIndexes(indexes);
+    scannedResult.setMeasureChunks(measureColumnDataChunk);
+    scannedResult.setNumberOfRows(indexes.length);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/scanner/impl/NonFilterScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/scanner/impl/NonFilterScanner.java b/core/src/main/java/org/apache/carbondata/scan/scanner/impl/NonFilterScanner.java
new file mode 100644
index 0000000..4d8a8e0
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/scanner/impl/NonFilterScanner.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.scanner.impl;
+
+import org.apache.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.scan.result.impl.NonFilterQueryScannedResult;
+import org.apache.carbondata.scan.scanner.AbstractBlockletScanner;
+
+/**
+ * Non filter processor which will be used for non filter query
+ * In case of non filter query we just need to read all the blocks requested in the
+ * query and pass it to scanned result
+ */
+public class NonFilterScanner extends AbstractBlockletScanner {
+
+  public NonFilterScanner(BlockExecutionInfo blockExecutionInfo) {
+    super(blockExecutionInfo);
+    // as its a non filter query creating a non filter query scanned result object
+    scannedResult = new NonFilterQueryScannedResult(blockExecutionInfo);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/apache/carbondata/scan/wrappers/ByteArrayWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/scan/wrappers/ByteArrayWrapper.java b/core/src/main/java/org/apache/carbondata/scan/wrappers/ByteArrayWrapper.java
new file mode 100644
index 0000000..1b3a4c6
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/scan/wrappers/ByteArrayWrapper.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.carbondata.scan.wrappers;
+
+import org.apache.carbondata.core.util.ByteUtil.UnsafeComparer;
+
+/**
+ * This class will store the dimension column data when query is executed
+ * This can be used as a key for aggregation
+ */
+public class ByteArrayWrapper implements Comparable<ByteArrayWrapper> {
+
+  /**
+   * to store key which is generated using
+   * key generator
+   */
+  protected byte[] dictionaryKey;
+
+  /**
+   * to store no dictionary column data
+   */
+  protected byte[][] complexTypesKeys;
+
+  /**
+   * to store no dictionary column data
+   */
+  protected byte[][] noDictionaryKeys;
+
+  public ByteArrayWrapper() {
+  }
+
+  /**
+   * @return the dictionaryKey
+   */
+  public byte[] getDictionaryKey() {
+    return dictionaryKey;
+  }
+
+  /**
+   * @param dictionaryKey the dictionaryKey to set
+   */
+  public void setDictionaryKey(byte[] dictionaryKey) {
+    this.dictionaryKey = dictionaryKey;
+  }
+
+  /**
+   * @param noDictionaryKeys the noDictionaryKeys to set
+   */
+  public void setNoDictionaryKeys(byte[][] noDictionaryKeys) {
+    this.noDictionaryKeys = noDictionaryKeys;
+  }
+
+  /**
+   * to get the no dictionary column data
+   *
+   * @param index of the no dictionary key
+   * @return no dictionary key for the index
+   */
+  public byte[] getNoDictionaryKeyByIndex(int index) {
+    return this.noDictionaryKeys[index];
+  }
+
+  /**
+   * to get the no dictionary column data
+   *
+   * @param index of the no dictionary key
+   * @return no dictionary key for the index
+   */
+  public byte[] getComplexTypeByIndex(int index) {
+    return this.complexTypesKeys[index];
+  }
+
+  /**
+   * to generate the hash code
+   */
+  @Override public int hashCode() {
+    // first generate the has code of the dictionary column
+    int len = dictionaryKey.length;
+    int result = 1;
+    for (int j = 0; j < len; j++) {
+      result = 31 * result + dictionaryKey[j];
+    }
+    // then no dictionary column
+    for (byte[] directSurrogateValue : noDictionaryKeys) {
+      for (int i = 0; i < directSurrogateValue.length; i++) {
+        result = 31 * result + directSurrogateValue[i];
+      }
+    }
+    // then for complex type
+    for (byte[] complexTypeKey : complexTypesKeys) {
+      for (int i = 0; i < complexTypeKey.length; i++) {
+        result = 31 * result + complexTypeKey[i];
+      }
+    }
+    return result;
+  }
+
+  /**
+   * to validate the two
+   *
+   * @param other object
+   */
+  @Override public boolean equals(Object other) {
+    if (null == other || !(other instanceof ByteArrayWrapper)) {
+      return false;
+    }
+    boolean result = false;
+    // Comparison will be as follows
+    // first compare the no dictionary column
+    // if it is not equal then return false
+    // if it is equal then compare the complex column
+    // if it is also equal then compare dictionary column
+    byte[][] noDictionaryKeysOther = ((ByteArrayWrapper) other).noDictionaryKeys;
+    if (noDictionaryKeysOther.length != noDictionaryKeys.length) {
+      return false;
+    } else {
+      for (int i = 0; i < noDictionaryKeys.length; i++) {
+        result = UnsafeComparer.INSTANCE.equals(noDictionaryKeys[i], noDictionaryKeysOther[i]);
+        if (!result) {
+          return false;
+        }
+      }
+    }
+
+    byte[][] complexTypesKeysOther = ((ByteArrayWrapper) other).complexTypesKeys;
+    if (complexTypesKeysOther.length != complexTypesKeys.length) {
+      return false;
+    } else {
+      for (int i = 0; i < complexTypesKeys.length; i++) {
+        result = UnsafeComparer.INSTANCE.equals(complexTypesKeys[i], complexTypesKeysOther[i]);
+        if (!result) {
+          return false;
+        }
+      }
+    }
+
+    return UnsafeComparer.INSTANCE.equals(dictionaryKey, ((ByteArrayWrapper) other).dictionaryKey);
+  }
+
+  /**
+   * Compare method for ByteArrayWrapper class this will used to compare Two
+   * ByteArrayWrapper data object, basically it will compare two byte array
+   *
+   * @param other ArrayWrapper Object
+   */
+  @Override public int compareTo(ByteArrayWrapper other) {
+    // compare will be as follows
+    //compare dictionary column
+    // then no dictionary column
+    // then complex type column data
+    int compareTo = UnsafeComparer.INSTANCE.compareTo(dictionaryKey, other.dictionaryKey);
+    if (compareTo == 0) {
+      for (int i = 0; i < noDictionaryKeys.length; i++) {
+        compareTo =
+            UnsafeComparer.INSTANCE.compareTo(noDictionaryKeys[i], other.noDictionaryKeys[i]);
+        if (compareTo != 0) {
+          return compareTo;
+        }
+      }
+    }
+    if (compareTo == 0) {
+      for (int i = 0; i < complexTypesKeys.length; i++) {
+        compareTo =
+            UnsafeComparer.INSTANCE.compareTo(complexTypesKeys[i], other.complexTypesKeys[i]);
+        if (compareTo != 0) {
+          return compareTo;
+        }
+      }
+    }
+    return compareTo;
+  }
+
+  /**
+   * @return the complexTypesKeys
+   */
+  public byte[][] getComplexTypesKeys() {
+    return complexTypesKeys;
+  }
+
+  /**
+   * @param complexTypesKeys the complexTypesKeys to set
+   */
+  public void setComplexTypesKeys(byte[][] complexTypesKeys) {
+    this.complexTypesKeys = complexTypesKeys;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/common/ext/ColumnUniqueIdGenerator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/common/ext/ColumnUniqueIdGenerator.java b/core/src/main/java/org/carbondata/common/ext/ColumnUniqueIdGenerator.java
deleted file mode 100644
index bc96302..0000000
--- a/core/src/main/java/org/carbondata/common/ext/ColumnUniqueIdGenerator.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.common.ext;
-
-import java.util.UUID;
-
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-import org.carbondata.core.service.ColumnUniqueIdService;
-
-/**
- * It returns unique id given column
- */
-public class ColumnUniqueIdGenerator implements ColumnUniqueIdService {
-
-  private static ColumnUniqueIdService columnUniqueIdService = new ColumnUniqueIdGenerator();
-
-  @Override public String generateUniqueId(String databaseName, ColumnSchema columnSchema) {
-    return UUID.randomUUID().toString();
-  }
-
-  public static ColumnUniqueIdService getInstance() {
-    return columnUniqueIdService;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/common/ext/DictionaryFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/common/ext/DictionaryFactory.java b/core/src/main/java/org/carbondata/common/ext/DictionaryFactory.java
deleted file mode 100644
index 944f772..0000000
--- a/core/src/main/java/org/carbondata/common/ext/DictionaryFactory.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.common.ext;
-
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.reader.CarbonDictionaryMetadataReader;
-import org.carbondata.core.reader.CarbonDictionaryMetadataReaderImpl;
-import org.carbondata.core.reader.CarbonDictionaryReader;
-import org.carbondata.core.reader.CarbonDictionaryReaderImpl;
-import org.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReader;
-import org.carbondata.core.reader.sortindex.CarbonDictionarySortIndexReaderImpl;
-import org.carbondata.core.service.DictionaryService;
-import org.carbondata.core.writer.CarbonDictionaryWriter;
-import org.carbondata.core.writer.CarbonDictionaryWriterImpl;
-import org.carbondata.core.writer.sortindex.CarbonDictionarySortIndexWriter;
-import org.carbondata.core.writer.sortindex.CarbonDictionarySortIndexWriterImpl;
-
-/**
- * service to get dictionary reader and writer
- */
-public class DictionaryFactory implements DictionaryService {
-
-  private static DictionaryService dictService = new DictionaryFactory();
-
-  /**
-   * get dictionary writer
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  @Override public CarbonDictionaryWriter getDictionaryWriter(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath) {
-    return new CarbonDictionaryWriterImpl(carbonStorePath, carbonTableIdentifier, columnIdentifier);
-  }
-
-  /**
-   * get dictionary sort index writer
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  @Override public CarbonDictionarySortIndexWriter getDictionarySortIndexWriter(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath) {
-    return new CarbonDictionarySortIndexWriterImpl(carbonTableIdentifier, columnIdentifier,
-        carbonStorePath);
-  }
-
-  /**
-   * get dictionary metadata reader
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  @Override public CarbonDictionaryMetadataReader getDictionaryMetadataReader(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath) {
-    return new CarbonDictionaryMetadataReaderImpl(carbonStorePath, carbonTableIdentifier,
-        columnIdentifier);
-  }
-
-  /**
-   * get dictionary reader
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  @Override public CarbonDictionaryReader getDictionaryReader(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath) {
-    return new CarbonDictionaryReaderImpl(carbonStorePath, carbonTableIdentifier, columnIdentifier);
-  }
-
-  /**
-   * get dictionary sort index reader
-   *
-   * @param carbonTableIdentifier
-   * @param columnIdentifier
-   * @param carbonStorePath
-   * @return
-   */
-  @Override public CarbonDictionarySortIndexReader getDictionarySortIndexReader(
-      CarbonTableIdentifier carbonTableIdentifier, ColumnIdentifier columnIdentifier,
-      String carbonStorePath) {
-    return new CarbonDictionarySortIndexReaderImpl(carbonTableIdentifier, columnIdentifier,
-        carbonStorePath);
-  }
-
-  public static DictionaryService getInstance() {
-    return dictService;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/common/ext/PathFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/common/ext/PathFactory.java b/core/src/main/java/org/carbondata/common/ext/PathFactory.java
deleted file mode 100644
index 8b64aec..0000000
--- a/core/src/main/java/org/carbondata/common/ext/PathFactory.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.common.ext;
-
-import org.carbondata.core.carbon.CarbonTableIdentifier;
-import org.carbondata.core.carbon.ColumnIdentifier;
-import org.carbondata.core.carbon.path.CarbonStorePath;
-import org.carbondata.core.carbon.path.CarbonTablePath;
-import org.carbondata.core.service.PathService;
-
-/**
- * Create helper to get path details
- */
-public class PathFactory implements PathService {
-
-  private static PathService pathService = new PathFactory();
-
-  /**
-   * @param columnIdentifier
-   * @param storeLocation
-   * @param tableIdentifier
-   * @return store path related to tables
-   */
-  @Override public CarbonTablePath getCarbonTablePath(ColumnIdentifier columnIdentifier,
-      String storeLocation, CarbonTableIdentifier tableIdentifier) {
-    return CarbonStorePath.getCarbonTablePath(storeLocation, tableIdentifier);
-  }
-
-  public static PathService getInstance() {
-    return pathService;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/common/factory/CarbonCommonFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/common/factory/CarbonCommonFactory.java b/core/src/main/java/org/carbondata/common/factory/CarbonCommonFactory.java
deleted file mode 100644
index dfa14f9..0000000
--- a/core/src/main/java/org/carbondata/common/factory/CarbonCommonFactory.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.common.factory;
-
-import org.carbondata.common.ext.ColumnUniqueIdGenerator;
-import org.carbondata.common.ext.DictionaryFactory;
-import org.carbondata.common.ext.PathFactory;
-import org.carbondata.core.service.ColumnUniqueIdService;
-import org.carbondata.core.service.DictionaryService;
-import org.carbondata.core.service.PathService;
-
-/**
- * Interface to get services
- */
-public class CarbonCommonFactory {
-
-  /**
-   * @return dictionary service
-   */
-  public static DictionaryService getDictionaryService() {
-    return DictionaryFactory.getInstance();
-  }
-
-  /**
-   * @return path service
-   */
-  public static PathService getPathService() {
-    return PathFactory.getInstance();
-  }
-
-  /**
-   * @return unique id generator
-   */
-  public static ColumnUniqueIdService getColumnUniqueIdGenerator() {
-    return ColumnUniqueIdGenerator.getInstance();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/Cache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/Cache.java b/core/src/main/java/org/carbondata/core/cache/Cache.java
deleted file mode 100644
index d2985bd..0000000
--- a/core/src/main/java/org/carbondata/core/cache/Cache.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache;
-
-import java.util.List;
-
-import org.carbondata.core.util.CarbonUtilException;
-
-/**
- * A semi-persistent mapping from keys to values. Cache entries are manually added using
- * #get(Key), #getAll(List<Keys>) , and are stored in the cache until
- * either evicted or manually invalidated.
- * Implementations of this interface are expected to be thread-safe, and can be safely accessed
- * by multiple concurrent threads.
- */
-public interface Cache<K, V> {
-
-  /**
-   * This method will get the value for the given key. If value does not exist
-   * for the given key, it will check and load the value.
-   *
-   * @param key
-   * @return
-   * @throws CarbonUtilException in case memory is not sufficient to load data into memory
-   */
-  V get(K key) throws CarbonUtilException;
-
-  /**
-   * This method will return a list of values for the given list of keys.
-   * For each key, this method will check and load the data if required.
-   *
-   * @param keys
-   * @return
-   * @throws CarbonUtilException in case memory is not sufficient to load data into memory
-   */
-  List<V> getAll(List<K> keys) throws CarbonUtilException;
-
-  /**
-   * This method will return the value for the given key. It will not check and load
-   * the data for the given key
-   *
-   * @param key
-   * @return
-   */
-  V getIfPresent(K key);
-
-  /**
-   * This method will remove the cache for a given key
-   *
-   * @param key
-   */
-  void invalidate(K key);
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/CacheProvider.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/CacheProvider.java b/core/src/main/java/org/carbondata/core/cache/CacheProvider.java
deleted file mode 100644
index ad9857a..0000000
--- a/core/src/main/java/org/carbondata/core/cache/CacheProvider.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
-import org.carbondata.core.cache.dictionary.ForwardDictionaryCache;
-import org.carbondata.core.cache.dictionary.ReverseDictionaryCache;
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * Cache provider class which will create a cache based on given type
- */
-public class CacheProvider {
-
-  /**
-   * cache provider instance
-   */
-  private static CacheProvider cacheProvider = new CacheProvider();
-
-  /**
-   * a map that will hold the entry for cache type to cache object mapping
-   */
-  private Map<CacheType, Cache> cacheTypeToCacheMap =
-      new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-
-  /**
-   * a map that will hold the mapping of cache type to LRU cache instance
-   */
-  private Map<CacheType, CarbonLRUCache> cacheTypeToLRUCacheMap =
-      new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-
-  /**
-   * object lock instance to be used in synchronization block
-   */
-  private final Object lock = new Object();
-
-  /**
-   * private constructor to follow singleton design pattern for this class
-   */
-  private CacheProvider() {
-
-  }
-
-  /**
-   * @return cache provider instance
-   */
-  public static CacheProvider getInstance() {
-    return cacheProvider;
-  }
-
-  /**
-   * This method will check if a cache already exists for given cache type and create in case
-   * it is not present in the map
-   *
-   * @param cacheType       type of cache
-   * @param carbonStorePath store path
-   * @param <K>
-   * @param <V>
-   * @return
-   */
-  public <K, V> Cache<K, V> createCache(CacheType cacheType, String carbonStorePath) {
-    //check if lru cache is null, if null create one
-    //check if cache is null for given cache type, if null create one
-    if (!dictionaryCacheAlreadyExists(cacheType)) {
-      synchronized (lock) {
-        if (!dictionaryCacheAlreadyExists(cacheType)) {
-          if (null == cacheTypeToLRUCacheMap.get(cacheType)) {
-            createLRULevelCacheInstance(cacheType);
-          }
-          createDictionaryCacheForGivenType(cacheType, carbonStorePath);
-        }
-      }
-    }
-    return cacheTypeToCacheMap.get(cacheType);
-  }
-
-  /**
-   * This method will create the cache for given cache type
-   *
-   * @param cacheType       type of cache
-   * @param carbonStorePath store path
-   */
-  private void createDictionaryCacheForGivenType(CacheType cacheType, String carbonStorePath) {
-    Cache cacheObject = null;
-    if (cacheType.equals(CacheType.REVERSE_DICTIONARY)) {
-      cacheObject =
-          new ReverseDictionaryCache<DictionaryColumnUniqueIdentifier, Dictionary>(carbonStorePath,
-              cacheTypeToLRUCacheMap.get(cacheType));
-    } else if (cacheType.equals(CacheType.FORWARD_DICTIONARY)) {
-      cacheObject =
-          new ForwardDictionaryCache<DictionaryColumnUniqueIdentifier, Dictionary>(carbonStorePath,
-              cacheTypeToLRUCacheMap.get(cacheType));
-    }
-    cacheTypeToCacheMap.put(cacheType, cacheObject);
-  }
-
-  /**
-   * This method will create the lru cache instance based on the given type
-   *
-   * @param cacheType
-   */
-  private void createLRULevelCacheInstance(CacheType cacheType) {
-    CarbonLRUCache carbonLRUCache = null;
-    // if cache type is dictionary cache, then same lru cache instance has to be shared
-    // between forward and reverse cache
-    if (cacheType.equals(CacheType.REVERSE_DICTIONARY) || cacheType
-        .equals(CacheType.FORWARD_DICTIONARY)) {
-      carbonLRUCache = new CarbonLRUCache(CarbonCommonConstants.CARBON_MAX_LEVEL_CACHE_SIZE,
-          CarbonCommonConstants.CARBON_MAX_LEVEL_CACHE_SIZE_DEFAULT);
-      cacheTypeToLRUCacheMap.put(CacheType.REVERSE_DICTIONARY, carbonLRUCache);
-      cacheTypeToLRUCacheMap.put(CacheType.FORWARD_DICTIONARY, carbonLRUCache);
-    }
-  }
-
-  /**
-   * This method will check whether the map already has an entry for
-   * given cache type
-   *
-   * @param cacheType
-   * @return
-   */
-  private boolean dictionaryCacheAlreadyExists(CacheType cacheType) {
-    return null != cacheTypeToCacheMap.get(cacheType);
-  }
-
-  /**
-   * Below method will be used to clear the cache
-   */
-  public void dropAllCache() {
-    cacheTypeToLRUCacheMap.clear();
-    cacheTypeToCacheMap.clear();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/CacheType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/CacheType.java b/core/src/main/java/org/carbondata/core/cache/CacheType.java
deleted file mode 100644
index d07daf8..0000000
--- a/core/src/main/java/org/carbondata/core/cache/CacheType.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache;
-
-import org.carbondata.core.cache.dictionary.Dictionary;
-import org.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
-
-/**
- * class which defines different cache types. cache type can be dictionary cache for
- * forward (surrogate key to byte array mapping) and reverse (byte array to
- * surrogate mapping) dictionary or a B-tree cache
- */
-public class CacheType<K, V> {
-
-  /**
-   * Forward dictionary cache which maintains surrogate key to byte array mapping
-   */
-  public static final CacheType<DictionaryColumnUniqueIdentifier, Dictionary> FORWARD_DICTIONARY =
-      new CacheType("forward_dictionary");
-
-  /**
-   * Reverse dictionary cache which maintains byte array to surrogate key mapping
-   */
-  public static final CacheType<DictionaryColumnUniqueIdentifier, Dictionary> REVERSE_DICTIONARY =
-      new CacheType("reverse_dictionary");
-
-  /**
-   * cacheName which is unique name for a cache
-   */
-  private String cacheName;
-
-  /**
-   * @param cacheName
-   */
-  private CacheType(String cacheName) {
-    this.cacheName = cacheName;
-  }
-
-  /**
-   * @return cache unique name
-   */
-  public String getCacheName() {
-    return cacheName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/Cacheable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/Cacheable.java b/core/src/main/java/org/carbondata/core/cache/Cacheable.java
deleted file mode 100644
index e0cc390..0000000
--- a/core/src/main/java/org/carbondata/core/cache/Cacheable.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache;
-
-/**
- * interface which declares methods which will decide whether to keep
- * cacheable objects in memory
- */
-public interface Cacheable {
-
-  /**
-   * This method will return the timestamp of file based on which decision
-   * the decision will be taken whether to read that file or not
-   *
-   * @return
-   */
-  long getFileTimeStamp();
-
-  /**
-   * This method will return the access count for a column based on which decision will be taken
-   * whether to keep the object in memory
-   *
-   * @return
-   */
-  int getAccessCount();
-
-  /**
-   * This method will return the memory size of a column
-   *
-   * @return
-   */
-  long getMemorySize();
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/CarbonLRUCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/CarbonLRUCache.java b/core/src/main/java/org/carbondata/core/cache/CarbonLRUCache.java
deleted file mode 100644
index 72ee209..0000000
--- a/core/src/main/java/org/carbondata/core/cache/CarbonLRUCache.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache;
-
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.constants.CarbonCommonConstants;
-import org.carbondata.core.util.CarbonProperties;
-
-/**
- * class which manages the lru cache
- */
-public final class CarbonLRUCache {
-  /**
-   * constant for converting MB into bytes
-   */
-  private static final int BYTE_CONVERSION_CONSTANT = 1024 * 1024;
-  /**
-   * Attribute for Carbon LOGGER
-   */
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(CarbonLRUCache.class.getName());
-  /**
-   * Map that will contain key as table unique name and value as cache Holder
-   * object
-   */
-  private Map<String, Cacheable> lruCacheMap;
-  /**
-   * lruCacheSize
-   */
-  private long lruCacheMemorySize;
-  /**
-   * totalSize size of the cache
-   */
-  private long currentSize;
-
-  /**
-   * @param propertyName        property name to take the size configured
-   * @param defaultPropertyName default property in case size is not configured
-   */
-  public CarbonLRUCache(String propertyName, String defaultPropertyName) {
-    try {
-      lruCacheMemorySize = Integer
-          .parseInt(CarbonProperties.getInstance().getProperty(propertyName, defaultPropertyName));
-    } catch (NumberFormatException e) {
-      lruCacheMemorySize = Integer.parseInt(defaultPropertyName);
-    }
-    initCache();
-    if (lruCacheMemorySize > 0) {
-      LOGGER.info("Configured level cahce size is " + lruCacheMemorySize + " MB");
-      // convert in bytes
-      lruCacheMemorySize = lruCacheMemorySize * BYTE_CONVERSION_CONSTANT;
-    } else {
-      LOGGER.info("Column cache size not configured. Therefore default behavior will be "
-              + "considered and no LRU based eviction of columns will be done");
-    }
-  }
-
-  /**
-   * initialize lru cache
-   */
-  private void initCache() {
-    lruCacheMap =
-        new LinkedHashMap<String, Cacheable>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE, 1.0f,
-            true);
-  }
-
-  /**
-   * This method will give the list of all the keys that can be deleted from
-   * the level LRU cache
-   */
-  private List<String> getKeysToBeRemoved(long size) {
-    List<String> toBeDeletedKeys =
-        new ArrayList<String>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    long removedSize = 0;
-    for (Entry<String, Cacheable> entry : lruCacheMap.entrySet()) {
-      String key = entry.getKey();
-      Cacheable cacheInfo = entry.getValue();
-      long memorySize = cacheInfo.getMemorySize();
-      if (canBeRemoved(cacheInfo)) {
-        removedSize = removedSize + memorySize;
-        toBeDeletedKeys.add(key);
-        // check if after removing the current file size, required
-        // size when added to current size is sufficient to load a
-        // level or not
-        if (lruCacheMemorySize >= (currentSize - memorySize + size)) {
-          toBeDeletedKeys.clear();
-          toBeDeletedKeys.add(key);
-          removedSize = memorySize;
-          break;
-        }
-        // check if after removing the added size/removed size,
-        // required size when added to current size is sufficient to
-        // load a level or not
-        else if (lruCacheMemorySize >= (currentSize - removedSize + size)) {
-          break;
-        }
-      }
-    }
-    // this case will come when iteration is complete over the keys but
-    // still size is not sufficient for level file to be loaded, then we
-    // will not delete any of the keys
-    if ((currentSize - removedSize + size) > lruCacheMemorySize) {
-      toBeDeletedKeys.clear();
-    }
-    return toBeDeletedKeys;
-  }
-
-  /**
-   * @param cacheInfo
-   * @return
-   */
-  private boolean canBeRemoved(Cacheable cacheInfo) {
-    if (cacheInfo.getAccessCount() > 0) {
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * @param key
-   */
-  public void remove(String key) {
-    synchronized (lruCacheMap) {
-      removeKey(key);
-    }
-  }
-
-  /**
-   * This method will remove the key from lru cache
-   *
-   * @param key
-   */
-  private void removeKey(String key) {
-    Cacheable cacheable = lruCacheMap.get(key);
-    if (null != cacheable) {
-      currentSize = currentSize - cacheable.getMemorySize();
-    }
-    lruCacheMap.remove(key);
-    LOGGER.info("Removed level entry from InMemory level lru cache :: " + key);
-  }
-
-  /**
-   * This method will check if required size is available in the memory and then add
-   * the given cacheable to object to lru cache
-   *
-   * @param columnIdentifier
-   * @param cacheInfo
-   */
-  public boolean put(String columnIdentifier, Cacheable cacheInfo, long requiredSize) {
-    boolean columnKeyAddedSuccessfully = false;
-    if (freeMemorySizeForAddingCache(requiredSize)) {
-      synchronized (lruCacheMap) {
-        currentSize = currentSize + requiredSize;
-        if (null == lruCacheMap.get(columnIdentifier)) {
-          lruCacheMap.put(columnIdentifier, cacheInfo);
-        }
-        columnKeyAddedSuccessfully = true;
-      }
-      LOGGER.debug("Added level entry to InMemory level lru cache :: " + columnIdentifier);
-    } else {
-      LOGGER.error("Size not available. Column cannot be added to level lru cache :: "
-          + columnIdentifier + " .Required Size = " + requiredSize + " Size available "
-          + (lruCacheMemorySize - currentSize));
-    }
-    return columnKeyAddedSuccessfully;
-  }
-
-  /**
-   * This method will check a required column can be loaded into memory or not. If required
-   * this method will call for eviction of existing data from memory
-   *
-   * @param requiredSize
-   * @return
-   */
-  private boolean freeMemorySizeForAddingCache(long requiredSize) {
-    boolean memoryAvailable = false;
-    if (lruCacheMemorySize > 0) {
-      if (isSizeAvailableToLoadColumnDictionary(requiredSize)) {
-        memoryAvailable = true;
-      } else {
-        synchronized (lruCacheMap) {
-          // get the keys that can be removed from memory
-          List<String> keysToBeRemoved = getKeysToBeRemoved(requiredSize);
-          for (String cacheKey : keysToBeRemoved) {
-            removeKey(cacheKey);
-          }
-          // after removing the keys check again if required size is available
-          if (isSizeAvailableToLoadColumnDictionary(requiredSize)) {
-            memoryAvailable = true;
-          }
-        }
-      }
-    } else {
-      memoryAvailable = true;
-    }
-    return memoryAvailable;
-  }
-
-  /**
-   * This method will check if size is available to laod dictionary into memory
-   *
-   * @param requiredSize
-   * @return
-   */
-  private boolean isSizeAvailableToLoadColumnDictionary(long requiredSize) {
-    return lruCacheMemorySize >= (currentSize + requiredSize);
-  }
-
-  /**
-   * @param key
-   * @return
-   */
-  public Cacheable get(String key) {
-    synchronized (lruCacheMap) {
-      return lruCacheMap.get(key);
-    }
-  }
-
-  /**
-   * This method will empty the level cache
-   */
-  public void clear() {
-    synchronized (lruCacheMap) {
-      lruCacheMap.clear();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/cd6a4ff3/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java b/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
deleted file mode 100644
index 5e20603..0000000
--- a/core/src/main/java/org/carbondata/core/cache/dictionary/AbstractColumnDictionaryInfo.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.carbondata.core.cache.dictionary;
-
-import java.nio.charset.Charset;
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.carbondata.core.constants.CarbonCommonConstants;
-
-/**
- * class that implements cacheable interface and methods specific to column dictionary
- */
-public abstract class AbstractColumnDictionaryInfo implements DictionaryInfo {
-
-  /**
-   * list that will hold all the dictionary chunks for one column
-   */
-  protected List<List<byte[]>> dictionaryChunks = new CopyOnWriteArrayList<>();
-
-  /**
-   * minimum value of surrogate key, dictionary value key will start from count 1
-   */
-  protected static final int MINIMUM_SURROGATE_KEY = 1;
-
-  /**
-   * atomic integer to maintain the access count for a column access
-   */
-  protected AtomicInteger accessCount = new AtomicInteger();
-
-  /**
-   * file timestamp
-   */
-  protected long fileTimeStamp;
-
-  /**
-   * offset till where file is read
-   */
-  protected long offsetTillFileIsRead;
-
-  /**
-   * length of dictionary metadata file
-   */
-  private long dictionaryMetaFileLength;
-
-  /**
-   * This method will return the timestamp of file based on which decision
-   * the decision will be taken whether to read that file or not
-   *
-   * @return
-   */
-  @Override public long getFileTimeStamp() {
-    return fileTimeStamp;
-  }
-
-  /**
-   * This method will return the access count for a column based on which decision will be taken
-   * whether to keep the object in memory
-   *
-   * @return
-   */
-  @Override public int getAccessCount() {
-    return accessCount.get();
-  }
-
-  /**
-   * This method will return the memory size of a column
-   *
-   * @return
-   */
-  @Override public long getMemorySize() {
-    return offsetTillFileIsRead;
-  }
-
-  /**
-   * This method will increment the access count for a column by 1
-   * whenever a column is getting used in query or incremental data load
-   */
-  @Override public void incrementAccessCount() {
-    accessCount.incrementAndGet();
-  }
-
-  /**
-   * This method will decrement the access count for a column by 1
-   * whenever a column usage is complete
-   */
-  private void decrementAccessCount() {
-    if (accessCount.get() > 0) {
-      accessCount.decrementAndGet();
-    }
-  }
-
-  /**
-   * This method will update the end offset of file everytime a file is read
-   *
-   * @param offsetTillFileIsRead
-   */
-  @Override public void setOffsetTillFileIsRead(long offsetTillFileIsRead) {
-    this.offsetTillFileIsRead = offsetTillFileIsRead;
-  }
-
-  /**
-   * This method will update the timestamp of a file if a file is modified
-   * like in case of incremental load
-   *
-   * @param fileTimeStamp
-   */
-  @Override public void setFileTimeStamp(long fileTimeStamp) {
-    this.fileTimeStamp = fileTimeStamp;
-  }
-
-  /**
-   * The method return the list of dictionary chunks of a column
-   * Applications Scenario.
-   * For preparing the column Sort info while writing the sort index file.
-   *
-   * @return
-   */
-  @Override public DictionaryChunksWrapper getDictionaryChunks() {
-    DictionaryChunksWrapper chunksWrapper = new DictionaryChunksWrapper(dictionaryChunks);
-    return chunksWrapper;
-  }
-
-  /**
-   * This method will release the objects and set default value for primitive types
-   */
-  @Override public void clear() {
-    decrementAccessCount();
-  }
-
-  /**
-   * This method will find and return the sort index for a given dictionary id.
-   * Applicable scenarios:
-   * 1. Used in case of order by queries when data sorting is required
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return if found returns key else 0
-   */
-  @Override public int getSortedIndex(int surrogateKey) {
-    return 0;
-  }
-
-  /**
-   * dictionary metadata file length which will be set whenever we reload dictionary
-   * data from disk
-   *
-   * @param dictionaryMetaFileLength length of dictionary metadata file
-   */
-  @Override public void setDictionaryMetaFileLength(long dictionaryMetaFileLength) {
-    this.dictionaryMetaFileLength = dictionaryMetaFileLength;
-  }
-
-  /**
-   * Dictionary meta file offset which will be read to check whether length of dictionary
-   * meta file has been modified
-   *
-   * @return
-   */
-  @Override public long getDictionaryMetaFileLength() {
-    return dictionaryMetaFileLength;
-  }
-
-  /**
-   * This method will find and return the dictionary value from sorted index.
-   * Applicable scenarios:
-   * 1. Query final result preparation in case of order by queries:
-   * While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param sortedIndex sort index of dictionary value
-   * @return value if found else null
-   */
-  @Override public String getDictionaryValueFromSortedIndex(int sortedIndex) {
-    return null;
-  }
-
-  /**
-   * This method will set the sort order index of a dictionary column.
-   * Sort order index if the index of dictionary values after they are sorted.
-   *
-   * @param sortOrderIndex
-   */
-  @Override public void setSortOrderIndex(List<Integer> sortOrderIndex) {
-  }
-
-  /**
-   * This method will set the sort reverse index of a dictionary column.
-   * Sort reverse index is the index of dictionary values before they are sorted.
-   *
-   * @param sortReverseOrderIndex
-   */
-  @Override public void setSortReverseOrderIndex(List<Integer> sortReverseOrderIndex) {
-  }
-
-  /**
-   * This method will find and return the dictionary value for a given surrogate key.
-   * Applicable scenarios:
-   * 1. Query final result preparation : While convert the final result which will
-   * be surrogate key back to original dictionary values this method will be used
-   *
-   * @param surrogateKey a unique ID for a dictionary value
-   * @return value if found else null
-   */
-  @Override public String getDictionaryValueForKey(int surrogateKey) {
-    String dictionaryValue = null;
-    if (surrogateKey < MINIMUM_SURROGATE_KEY) {
-      return dictionaryValue;
-    }
-    byte[] dictionaryValueInBytes = getDictionaryBytesFromSurrogate(surrogateKey);
-    if (null != dictionaryValueInBytes) {
-      dictionaryValue = new String(dictionaryValueInBytes,
-          Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
-    }
-    return dictionaryValue;
-  }
-
-  /**
-   * This method will find and return the dictionary value as byte array for a
-   * given surrogate key
-   *
-   * @param surrogateKey
-   * @return
-   */
-  protected byte[] getDictionaryBytesFromSurrogate(int surrogateKey) {
-    byte[] dictionaryValueInBytes = null;
-    int totalSizeOfDictionaryChunksTraversed = 0;
-    for (List<byte[]> oneDictionaryChunk : dictionaryChunks) {
-      totalSizeOfDictionaryChunksTraversed =
-          totalSizeOfDictionaryChunksTraversed + oneDictionaryChunk.size();
-      // skip the dictionary chunk till surrogate key is lesser than size of
-      // dictionary chunks traversed
-      if (totalSizeOfDictionaryChunksTraversed < surrogateKey) {
-        continue;
-      }
-      // lets say surrogateKey = 26, total size traversed is 28, dictionary chunk size = 12
-      // then surrogate position in dictionary chunk list is = 26 - (28-12) - 1 = 9
-      // -1 because list index starts from 0
-      int surrogatePositionInDictionaryChunk =
-          surrogateKey - (totalSizeOfDictionaryChunksTraversed - oneDictionaryChunk.size()) - 1;
-      dictionaryValueInBytes = oneDictionaryChunk.get(surrogatePositionInDictionaryChunk);
-      break;
-    }
-    return dictionaryValueInBytes;
-  }
-
-  /**
-   * This method will find and return the surrogate key for a given dictionary value
-   * Applicable scenario:
-   * 1. Incremental data load : Dictionary will not be generated for existing values. For
-   * that values have to be looked up in the existing dictionary cache.
-   * 2. Filter scenarios where from value surrogate key has to be found.
-   *
-   * @param value dictionary value
-   * @return if found returns key else 0
-   */
-  @Override public int getSurrogateKey(String value) {
-    byte[] keyData = value.getBytes(Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET));
-    return getSurrogateKey(keyData);
-  }
-}
-