You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by jb...@apache.org on 2016/06/23 14:16:20 UTC
[32/56] [abbrv] incubator-carbondata git commit: Refactor
org.carbondata.query package (#692)
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/query/util/DataFileFooterConverter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/query/util/DataFileFooterConverter.java b/core/src/main/java/org/carbondata/query/util/DataFileFooterConverter.java
deleted file mode 100644
index d79ea5d..0000000
--- a/core/src/main/java/org/carbondata/query/util/DataFileFooterConverter.java
+++ /dev/null
@@ -1,418 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.carbondata.query.util;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Iterator;
-import java.util.List;
-
-import org.carbondata.common.logging.LogService;
-import org.carbondata.common.logging.LogServiceFactory;
-import org.carbondata.core.carbon.metadata.blocklet.BlockletInfo;
-import org.carbondata.core.carbon.metadata.blocklet.DataFileFooter;
-import org.carbondata.core.carbon.metadata.blocklet.SegmentInfo;
-import org.carbondata.core.carbon.metadata.blocklet.compressor.ChunkCompressorMeta;
-import org.carbondata.core.carbon.metadata.blocklet.compressor.CompressionCodec;
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.DataChunk;
-import org.carbondata.core.carbon.metadata.blocklet.datachunk.PresenceMeta;
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletBTreeIndex;
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletIndex;
-import org.carbondata.core.carbon.metadata.blocklet.index.BlockletMinMaxIndex;
-import org.carbondata.core.carbon.metadata.blocklet.sort.SortState;
-import org.carbondata.core.carbon.metadata.datatype.DataType;
-import org.carbondata.core.carbon.metadata.encoder.Encoding;
-import org.carbondata.core.carbon.metadata.schema.table.column.ColumnSchema;
-import org.carbondata.core.datastorage.store.FileHolder;
-import org.carbondata.core.datastorage.store.impl.FileFactory;
-import org.carbondata.core.metadata.ValueEncoderMeta;
-import org.carbondata.core.reader.CarbonFooterReader;
-import org.carbondata.core.util.ByteUtil;
-import org.carbondata.core.util.CarbonUtil;
-import org.carbondata.format.FileFooter;
-
-/**
- * Below class will be used to convert the thrift object of data file
- * meta data to wrapper object
- */
-public class DataFileFooterConverter {
-
- private static final LogService LOGGER =
- LogServiceFactory.getLogService(DataFileFooterConverter.class.getName());
-
- /**
- * Below method will be used to convert thrift file meta to wrapper file meta
- */
- public DataFileFooter readDataFileFooter(String filePath, long blockOffset, long blockLength)
- throws IOException {
- DataFileFooter dataFileFooter = new DataFileFooter();
- FileHolder fileReader = null;
- try {
- long completeBlockLength = blockOffset + blockLength;
- long footerPointer = completeBlockLength - 8;
- fileReader = FileFactory.getFileHolder(FileFactory.getFileType(filePath));
- long actualFooterOffset = fileReader.readLong(filePath, footerPointer);
- CarbonFooterReader reader = new CarbonFooterReader(filePath, actualFooterOffset);
- FileFooter footer = reader.readFooter();
- dataFileFooter.setVersionId(footer.getVersion());
- dataFileFooter.setNumberOfRows(footer.getNum_rows());
- dataFileFooter.setSegmentInfo(getSegmentInfo(footer.getSegment_info()));
- List<ColumnSchema> columnSchemaList = new ArrayList<ColumnSchema>();
- List<org.carbondata.format.ColumnSchema> table_columns = footer.getTable_columns();
- for (int i = 0; i < table_columns.size(); i++) {
- columnSchemaList.add(thriftColumnSchmeaToWrapperColumnSchema(table_columns.get(i)));
- }
- dataFileFooter.setColumnInTable(columnSchemaList);
-
- List<org.carbondata.format.BlockletIndex> leaf_node_indices_Thrift =
- footer.getBlocklet_index_list();
- List<BlockletIndex> blockletIndexList = new ArrayList<BlockletIndex>();
- for (int i = 0; i < leaf_node_indices_Thrift.size(); i++) {
- BlockletIndex blockletIndex = getBlockletIndex(leaf_node_indices_Thrift.get(i));
- blockletIndexList.add(blockletIndex);
- }
-
- List<org.carbondata.format.BlockletInfo> leaf_node_infos_Thrift =
- footer.getBlocklet_info_list();
- List<BlockletInfo> blockletInfoList = new ArrayList<BlockletInfo>();
- for (int i = 0; i < leaf_node_infos_Thrift.size(); i++) {
- BlockletInfo blockletInfo = getBlockletInfo(leaf_node_infos_Thrift.get(i));
- blockletInfo.setBlockletIndex(blockletIndexList.get(i));
- blockletInfoList.add(blockletInfo);
- }
- dataFileFooter.setBlockletList(blockletInfoList);
- dataFileFooter.setBlockletIndex(getBlockletIndexForDataFileFooter(blockletIndexList));
- } finally {
- if (null != fileReader) {
- fileReader.finish();
- }
- }
- return dataFileFooter;
- }
-
- /**
- * Below method will be used to get blocklet index for data file meta
- *
- * @param blockletIndexList
- * @return blocklet index
- */
- private BlockletIndex getBlockletIndexForDataFileFooter(List<BlockletIndex> blockletIndexList) {
- BlockletIndex blockletIndex = new BlockletIndex();
- BlockletBTreeIndex blockletBTreeIndex = new BlockletBTreeIndex();
- blockletBTreeIndex.setStartKey(blockletIndexList.get(0).getBtreeIndex().getStartKey());
- blockletBTreeIndex
- .setEndKey(blockletIndexList.get(blockletIndexList.size() - 1).getBtreeIndex().getEndKey());
- blockletIndex.setBtreeIndex(blockletBTreeIndex);
- byte[][] currentMinValue = blockletIndexList.get(0).getMinMaxIndex().getMinValues().clone();
- byte[][] currentMaxValue = blockletIndexList.get(0).getMinMaxIndex().getMaxValues().clone();
- byte[][] minValue = null;
- byte[][] maxValue = null;
- for (int i = 1; i < blockletIndexList.size(); i++) {
- minValue = blockletIndexList.get(i).getMinMaxIndex().getMinValues();
- maxValue = blockletIndexList.get(i).getMinMaxIndex().getMaxValues();
- for (int j = 0; j < maxValue.length; j++) {
- if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(currentMinValue[j], minValue[j]) > 0) {
- currentMinValue[j] = minValue[j].clone();
- }
- if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(currentMaxValue[j], maxValue[j]) < 0) {
- currentMaxValue[j] = maxValue[j].clone();
- }
- }
- }
-
- BlockletMinMaxIndex minMax = new BlockletMinMaxIndex();
- minMax.setMaxValues(currentMaxValue);
- minMax.setMinValues(currentMinValue);
- blockletIndex.setMinMaxIndex(minMax);
- return blockletIndex;
- }
-
- private ColumnSchema thriftColumnSchmeaToWrapperColumnSchema(
- org.carbondata.format.ColumnSchema externalColumnSchema) {
- ColumnSchema wrapperColumnSchema = new ColumnSchema();
- wrapperColumnSchema.setColumnUniqueId(externalColumnSchema.getColumn_id());
- wrapperColumnSchema.setColumnName(externalColumnSchema.getColumn_name());
- wrapperColumnSchema.setColumnar(externalColumnSchema.isColumnar());
- wrapperColumnSchema
- .setDataType(thriftDataTyopeToWrapperDataType(externalColumnSchema.data_type));
- wrapperColumnSchema.setDimensionColumn(externalColumnSchema.isDimension());
- List<Encoding> encoders = new ArrayList<Encoding>();
- for (org.carbondata.format.Encoding encoder : externalColumnSchema.getEncoders()) {
- encoders.add(fromExternalToWrapperEncoding(encoder));
- }
- wrapperColumnSchema.setEncodingList(encoders);
- wrapperColumnSchema.setNumberOfChild(externalColumnSchema.getNum_child());
- wrapperColumnSchema.setPrecision(externalColumnSchema.getPrecision());
- wrapperColumnSchema.setColumnGroup(externalColumnSchema.getColumn_group_id());
- wrapperColumnSchema.setScale(externalColumnSchema.getScale());
- wrapperColumnSchema.setDefaultValue(externalColumnSchema.getDefault_value());
- wrapperColumnSchema.setAggregateFunction(externalColumnSchema.getAggregate_function());
- return wrapperColumnSchema;
- }
-
- /**
- * Below method is to convert the blocklet info of the thrift to wrapper
- * blocklet info
- *
- * @param blockletInfoThrift blocklet info of the thrift
- * @return blocklet info wrapper
- */
- private BlockletInfo getBlockletInfo(org.carbondata.format.BlockletInfo blockletInfoThrift) {
- BlockletInfo blockletInfo = new BlockletInfo();
- List<DataChunk> dimensionColumnChunk = new ArrayList<DataChunk>();
- List<DataChunk> measureChunk = new ArrayList<DataChunk>();
- Iterator<org.carbondata.format.DataChunk> column_data_chunksIterator =
- blockletInfoThrift.getColumn_data_chunksIterator();
- while (column_data_chunksIterator.hasNext()) {
- org.carbondata.format.DataChunk next = column_data_chunksIterator.next();
- if (next.isRowMajor()) {
- dimensionColumnChunk.add(getDataChunk(next, false));
- } else if (next.getEncoders().contains(org.carbondata.format.Encoding.DELTA)) {
- measureChunk.add(getDataChunk(next, true));
- } else {
-
- dimensionColumnChunk.add(getDataChunk(next, false));
- }
- }
- blockletInfo.setDimensionColumnChunk(dimensionColumnChunk);
- blockletInfo.setMeasureColumnChunk(measureChunk);
- blockletInfo.setNumberOfRows(blockletInfoThrift.getNum_rows());
- return blockletInfo;
- }
-
- /**
- * Below method is convert the thrift encoding to wrapper encoding
- *
- * @param encoderThrift thrift encoding
- * @return wrapper encoding
- */
- private Encoding fromExternalToWrapperEncoding(org.carbondata.format.Encoding encoderThrift) {
- switch (encoderThrift) {
- case DICTIONARY:
- return Encoding.DICTIONARY;
- case DELTA:
- return Encoding.DELTA;
- case RLE:
- return Encoding.RLE;
- case INVERTED_INDEX:
- return Encoding.INVERTED_INDEX;
- case BIT_PACKED:
- return Encoding.BIT_PACKED;
- case DIRECT_DICTIONARY:
- return Encoding.DIRECT_DICTIONARY;
- default:
- return Encoding.DICTIONARY;
- }
- }
-
- /**
- * Below method will be used to convert the thrift compression to wrapper
- * compression codec
- *
- * @param compressionCodecThrift
- * @return wrapper compression codec
- */
- private CompressionCodec getCompressionCodec(
- org.carbondata.format.CompressionCodec compressionCodecThrift) {
- switch (compressionCodecThrift) {
- case SNAPPY:
- return CompressionCodec.SNAPPY;
- default:
- return CompressionCodec.SNAPPY;
- }
- }
-
- /**
- * Below method will be used to convert thrift segment object to wrapper
- * segment object
- *
- * @param segmentInfo thrift segment info object
- * @return wrapper segment info object
- */
- private SegmentInfo getSegmentInfo(org.carbondata.format.SegmentInfo segmentInfo) {
- SegmentInfo info = new SegmentInfo();
- int[] cardinality = new int[segmentInfo.getColumn_cardinalities().size()];
- for (int i = 0; i < cardinality.length; i++) {
- cardinality[i] = segmentInfo.getColumn_cardinalities().get(i);
- }
- info.setColumnCardinality(cardinality);
- info.setNumberOfColumns(segmentInfo.getNum_cols());
- return info;
- }
-
- /**
- * Below method will be used to convert the blocklet index of thrift to
- * wrapper
- *
- * @param blockletIndexThrift
- * @return blocklet index wrapper
- */
- private BlockletIndex getBlockletIndex(org.carbondata.format.BlockletIndex blockletIndexThrift) {
- org.carbondata.format.BlockletBTreeIndex btreeIndex = blockletIndexThrift.getB_tree_index();
- org.carbondata.format.BlockletMinMaxIndex minMaxIndex = blockletIndexThrift.getMin_max_index();
- return new BlockletIndex(
- new BlockletBTreeIndex(btreeIndex.getStart_key(), btreeIndex.getEnd_key()),
- new BlockletMinMaxIndex(minMaxIndex.getMin_values(), minMaxIndex.getMax_values()));
- }
-
- /**
- * Below method will be used to convert the thrift compression meta to
- * wrapper chunk compression meta
- *
- * @param chunkCompressionMetaThrift
- * @return chunkCompressionMetaWrapper
- */
- private ChunkCompressorMeta getChunkCompressionMeta(
- org.carbondata.format.ChunkCompressionMeta chunkCompressionMetaThrift) {
- ChunkCompressorMeta compressorMeta = new ChunkCompressorMeta();
- compressorMeta
- .setCompressor(getCompressionCodec(chunkCompressionMetaThrift.getCompression_codec()));
- compressorMeta.setCompressedSize(chunkCompressionMetaThrift.getTotal_compressed_size());
- compressorMeta.setUncompressedSize(chunkCompressionMetaThrift.getTotal_uncompressed_size());
- return compressorMeta;
- }
-
- /**
- * Below method will be used to convert the thrift data type to wrapper data
- * type
- *
- * @param dataTypeThrift
- * @return dataType wrapper
- */
- private DataType thriftDataTyopeToWrapperDataType(org.carbondata.format.DataType dataTypeThrift) {
- switch (dataTypeThrift) {
- case STRING:
- return DataType.STRING;
- case INT:
- return DataType.INT;
- case LONG:
- return DataType.LONG;
- case DOUBLE:
- return DataType.DOUBLE;
- case DECIMAL:
- return DataType.DECIMAL;
- case TIMESTAMP:
- return DataType.TIMESTAMP;
- case ARRAY:
- return DataType.ARRAY;
- case STRUCT:
- return DataType.STRUCT;
- default:
- return DataType.STRING;
- }
- }
-
- /**
- * Below method will be used to convert the thrift presence meta to wrapper
- * presence meta
- *
- * @param presentMetadataThrift
- * @return wrapper presence meta
- */
- private PresenceMeta getPresenceMeta(org.carbondata.format.PresenceMeta presentMetadataThrift) {
- PresenceMeta presenceMeta = new PresenceMeta();
- presenceMeta.setRepresentNullValues(presentMetadataThrift.isRepresents_presence());
- presenceMeta.setBitSet(BitSet.valueOf(presentMetadataThrift.getPresent_bit_stream()));
- return presenceMeta;
- }
-
- /**
- * Below method will be used to convert the thrift object to wrapper object
- *
- * @param sortStateThrift
- * @return wrapper sort state object
- */
- private SortState getSortState(org.carbondata.format.SortState sortStateThrift) {
- if (sortStateThrift == org.carbondata.format.SortState.SORT_EXPLICIT) {
- return SortState.SORT_EXPLICT;
- } else if (sortStateThrift == org.carbondata.format.SortState.SORT_NATIVE) {
- return SortState.SORT_NATIVE;
- } else {
- return SortState.SORT_NONE;
- }
- }
-
- /**
- * Below method will be used to convert the thrift data chunk to wrapper
- * data chunk
- *
- * @param datachunkThrift
- * @return wrapper data chunk
- */
- private DataChunk getDataChunk(org.carbondata.format.DataChunk datachunkThrift,
- boolean isPresenceMetaPresent) {
- DataChunk dataChunk = new DataChunk();
- dataChunk.setColumnUniqueIdList(datachunkThrift.getColumn_ids());
- dataChunk.setDataPageLength(datachunkThrift.getData_page_length());
- dataChunk.setDataPageOffset(datachunkThrift.getData_page_offset());
- if (isPresenceMetaPresent) {
- dataChunk.setNullValueIndexForColumn(getPresenceMeta(datachunkThrift.getPresence()));
- }
- dataChunk.setRlePageLength(datachunkThrift.getRle_page_length());
- dataChunk.setRlePageOffset(datachunkThrift.getRle_page_offset());
- dataChunk.setRowMajor(datachunkThrift.isRowMajor());
- dataChunk.setRowIdPageLength(datachunkThrift.getRowid_page_length());
- dataChunk.setRowIdPageOffset(datachunkThrift.getRowid_page_offset());
- dataChunk.setSortState(getSortState(datachunkThrift.getSort_state()));
- dataChunk.setChunkCompressionMeta(getChunkCompressionMeta(datachunkThrift.getChunk_meta()));
- List<Encoding> encodingList = new ArrayList<Encoding>(datachunkThrift.getEncoders().size());
- for (int i = 0; i < datachunkThrift.getEncoders().size(); i++) {
- encodingList.add(fromExternalToWrapperEncoding(datachunkThrift.getEncoders().get(i)));
- }
- dataChunk.setEncoderList(encodingList);
- if (encodingList.contains(Encoding.DELTA)) {
- List<ByteBuffer> thriftEncoderMeta = datachunkThrift.getEncoder_meta();
- List<ValueEncoderMeta> encodeMetaList =
- new ArrayList<ValueEncoderMeta>(thriftEncoderMeta.size());
- for (int i = 0; i < thriftEncoderMeta.size(); i++) {
- encodeMetaList.add(deserializeEncoderMeta(thriftEncoderMeta.get(i).array()));
- }
- dataChunk.setValueEncoderMeta(encodeMetaList);
- }
- return dataChunk;
- }
-
- /**
- * Below method will be used to convert the encode metadata to
- * ValueEncoderMeta object
- *
- * @param encoderMeta
- * @return ValueEncoderMeta object
- */
- private ValueEncoderMeta deserializeEncoderMeta(byte[] encoderMeta) {
- // TODO : should remove the unnecessary fields.
- ByteArrayInputStream aos = null;
- ObjectInputStream objStream = null;
- ValueEncoderMeta meta = null;
- try {
- aos = new ByteArrayInputStream(encoderMeta);
- objStream = new ObjectInputStream(aos);
- meta = (ValueEncoderMeta) objStream.readObject();
- } catch (ClassNotFoundException e) {
- LOGGER.error(e);
- } catch (IOException e) {
- CarbonUtil.closeStreams(objStream);
- }
- return meta;
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/collector/ScannedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/collector/ScannedResultCollector.java b/core/src/main/java/org/carbondata/scan/collector/ScannedResultCollector.java
new file mode 100644
index 0000000..f214a9d
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/collector/ScannedResultCollector.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.collector;
+
+import org.carbondata.scan.result.AbstractScannedResult;
+import org.carbondata.scan.result.Result;
+
+/**
+ * Interface which will be used to aggregate the scan result
+ */
+public interface ScannedResultCollector {
+
+ /**
+ * Below method will be used to aggregate the scanned result
+ *
+ * @param scannedResult scanned result
+ * @return how many records was aggregated
+ */
+ int collectData(AbstractScannedResult scannedResult, int batchSize);
+
+ /**
+ * Below method will be used to get the aggregated result
+ *
+ * @return
+ */
+ Result getCollectedResult();
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/collector/impl/ListBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/collector/impl/ListBasedResultCollector.java b/core/src/main/java/org/carbondata/scan/collector/impl/ListBasedResultCollector.java
new file mode 100644
index 0000000..92507ea
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/collector/impl/ListBasedResultCollector.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.collector.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.carbondata.common.logging.LogService;
+import org.carbondata.common.logging.LogServiceFactory;
+import org.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
+import org.carbondata.core.carbon.metadata.datatype.DataType;
+import org.carbondata.core.keygenerator.KeyGenException;
+import org.carbondata.scan.collector.ScannedResultCollector;
+import org.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.carbondata.scan.executor.infos.KeyStructureInfo;
+import org.carbondata.scan.executor.util.QueryUtil;
+import org.carbondata.scan.result.AbstractScannedResult;
+import org.carbondata.scan.result.ListBasedResultWrapper;
+import org.carbondata.scan.result.Result;
+import org.carbondata.scan.result.impl.ListBasedResult;
+import org.carbondata.scan.util.DataTypeUtil;
+import org.carbondata.scan.wrappers.ByteArrayWrapper;
+
+/**
+ * It is not a collector it is just a scanned result holder.
+ *
+ */
+public class ListBasedResultCollector implements ScannedResultCollector {
+
+ private static final LogService LOGGER =
+ LogServiceFactory.getLogService(ListBasedResultCollector.class.getName());
+
+ /**
+ * to keep a track of number of row processed to handle limit push down in
+ * case of detail query scenario
+ */
+ private int rowCounter;
+
+ /**
+ * dimension values list
+ */
+ private List<ListBasedResultWrapper> listBasedResult;
+
+ /**
+ * restructuring info
+ */
+ private KeyStructureInfo restructureInfos;
+
+ /**
+ * table block execution infos
+ */
+ private BlockExecutionInfo tableBlockExecutionInfos;
+
+ private int[] measuresOrdinal;
+
+ /**
+ * to check whether measure exists in current table block or not this to
+ * handle restructuring scenario
+ */
+ private boolean[] isMeasureExistsInCurrentBlock;
+
+ /**
+ * default value of the measures in case of restructuring some measure wont
+ * be present in the table so in that default value will be used to
+ * aggregate the data for that measure columns
+ */
+ private Object[] measureDefaultValue;
+
+ /**
+ * measure datatypes.
+ */
+ private DataType[] measureDatatypes;
+
+ public ListBasedResultCollector(BlockExecutionInfo blockExecutionInfos) {
+ this.tableBlockExecutionInfos = blockExecutionInfos;
+ restructureInfos = blockExecutionInfos.getKeyStructureInfo();
+ measuresOrdinal = tableBlockExecutionInfos.getAggregatorInfo().getMeasureOrdinals();
+ isMeasureExistsInCurrentBlock = tableBlockExecutionInfos.getAggregatorInfo().getMeasureExists();
+ measureDefaultValue = tableBlockExecutionInfos.getAggregatorInfo().getDefaultValues();
+ this.measureDatatypes = tableBlockExecutionInfos.getAggregatorInfo().getMeasureDataTypes();
+ }
+
+ @Override
+ /**
+ * This method will add a record both key and value to list object
+ * it will keep track of how many record is processed, to handle limit scenario
+ * @param scanned result
+ *
+ */
+ public int collectData(AbstractScannedResult scannedResult, int batchSize) {
+ this.listBasedResult =
+ new ArrayList<>(batchSize);
+ boolean isMsrsPresent = measureDatatypes.length > 0;
+ ByteArrayWrapper wrapper = null;
+ // scan the record and add to list
+ ListBasedResultWrapper resultWrapper;
+ int rowCounter = 0;
+ while (scannedResult.hasNext() && rowCounter < batchSize) {
+ resultWrapper = new ListBasedResultWrapper();
+ if(tableBlockExecutionInfos.isDimensionsExistInQuery()) {
+ wrapper = new ByteArrayWrapper();
+ wrapper.setDictionaryKey(scannedResult.getDictionaryKeyArray());
+ wrapper.setNoDictionaryKeys(scannedResult.getNoDictionaryKeyArray());
+ wrapper.setComplexTypesKeys(scannedResult.getComplexTypeKeyArray());
+ resultWrapper.setKey(wrapper);
+ } else {
+ scannedResult.incrementCounter();
+ }
+ if(isMsrsPresent) {
+ Object[] msrValues = new Object[measureDatatypes.length];
+ fillMeasureData(msrValues, scannedResult);
+ resultWrapper.setValue(msrValues);
+ }
+ listBasedResult.add(resultWrapper);
+ rowCounter++;
+ }
+ return rowCounter;
+ }
+
+ private void fillMeasureData(Object[] msrValues, AbstractScannedResult scannedResult) {
+ for (short i = 0; i < measuresOrdinal.length; i++) {
+ // if measure exists is block then pass measure column
+ // data chunk to the collector
+ if (isMeasureExistsInCurrentBlock[i]) {
+ msrValues[i] =
+ getMeasureData(scannedResult.getMeasureChunk(measuresOrdinal[i]),
+ scannedResult.getCurrenrRowId(),measureDatatypes[i]);
+ } else {
+ // if not then get the default value and use that value in aggregation
+ msrValues[i] = measureDefaultValue[i];
+ }
+ }
+ }
+
+ private Object getMeasureData(MeasureColumnDataChunk dataChunk, int index, DataType dataType) {
+ if (!dataChunk.getNullValueIndexHolder().getBitSet().get(index)) {
+ Object msrVal;
+ switch (dataType) {
+ case LONG:
+ msrVal = dataChunk.getMeasureDataHolder().getReadableLongValueByIndex(index);
+ break;
+ case DECIMAL:
+ msrVal = dataChunk.getMeasureDataHolder().getReadableBigDecimalValueByIndex(index);
+ break;
+ default:
+ msrVal = dataChunk.getMeasureDataHolder().getReadableDoubleValueByIndex(index);
+ }
+ return DataTypeUtil.getMeasureDataBasedOnDataType(msrVal, dataType);
+ }
+ return null;
+ }
+
+ /**
+ * Below method will used to get the result
+ */
+ @Override public Result getCollectedResult() {
+ Result<List<ListBasedResultWrapper>, Object> result = new ListBasedResult();
+ if (!tableBlockExecutionInfos.isFixedKeyUpdateRequired()) {
+ updateKeyWithLatestBlockKeyGenerator();
+ }
+ result.addScannedResult(listBasedResult);
+ return result;
+ }
+
+
+
+ /**
+ * Below method will be used to update the fixed length key with the
+ * latest block key generator
+ *
+ * @return updated block
+ */
+ private void updateKeyWithLatestBlockKeyGenerator() {
+ try {
+ long[] data = null;
+ ByteArrayWrapper key = null;
+ for (int i = 0; i < listBasedResult.size(); i++) {
+ // get the key
+ key = listBasedResult.get(i).getKey();
+ // unpack the key with table block key generator
+ data = tableBlockExecutionInfos.getBlockKeyGenerator()
+ .getKeyArray(key.getDictionaryKey(), tableBlockExecutionInfos.getMaskedByteForBlock());
+ // packed the key with latest block key generator
+ // and generate the masked key for that key
+ key.setDictionaryKey(QueryUtil
+ .getMaskedKey(restructureInfos.getKeyGenerator().generateKey(data),
+ restructureInfos.getMaxKey(), restructureInfos.getMaskByteRanges(),
+ restructureInfos.getMaskByteRanges().length));
+ listBasedResult.get(i).setKey(key);
+ }
+ } catch (KeyGenException e) {
+ LOGGER.error(e);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/executor/QueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/QueryExecutor.java b/core/src/main/java/org/carbondata/scan/executor/QueryExecutor.java
new file mode 100644
index 0000000..6e04714
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/executor/QueryExecutor.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.executor;
+
+import org.carbondata.core.iterator.CarbonIterator;
+import org.carbondata.scan.executor.exception.QueryExecutionException;
+import org.carbondata.scan.model.QueryModel;
+
+/**
+ * Interface for carbon query executor.
+ * Will be used to execute the query based on the query model
+ * and will return the iterator over query result
+ */
+public interface QueryExecutor<E> {
+
+ /**
+ * Below method will be used to execute the query based on query model passed from driver
+ *
+ * @param queryModel query details
+ * @return query result iterator
+ * @throws QueryExecutionException if any failure while executing the query
+ */
+ CarbonIterator<E> execute(QueryModel queryModel) throws QueryExecutionException;
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/executor/QueryExecutorFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/QueryExecutorFactory.java b/core/src/main/java/org/carbondata/scan/executor/QueryExecutorFactory.java
new file mode 100644
index 0000000..724b8b6
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/executor/QueryExecutorFactory.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.executor;
+
+import org.carbondata.scan.executor.impl.DetailQueryExecutor;
+import org.carbondata.scan.executor.impl.DetailRawRecordQueryExecutor;
+import org.carbondata.scan.model.QueryModel;
+
+/**
+ * Factory class to get the query executor from RDD
+ * This will return the executor based on query type
+ */
+public class QueryExecutorFactory {
+
+ public static QueryExecutor getQueryExecutor(QueryModel queryModel) {
+ if (queryModel.isForcedDetailRawQuery()) {
+ return new DetailRawRecordQueryExecutor();
+ } else {
+ return new DetailQueryExecutor();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/executor/exception/QueryExecutionException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/exception/QueryExecutionException.java b/core/src/main/java/org/carbondata/scan/executor/exception/QueryExecutionException.java
new file mode 100644
index 0000000..f5d0e81
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/executor/exception/QueryExecutionException.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.executor.exception;
+
+import java.util.Locale;
+
+/**
+ * Exception class for query execution
+ *
+ * @author Administrator
+ */
+public class QueryExecutionException extends Exception {
+
+ /**
+ * default serial version ID.
+ */
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * The Error message.
+ */
+ private String msg = "";
+
+ /**
+ * Constructor
+ *
+ * @param errorCode The error code for this exception.
+ * @param msg The error message for this exception.
+ */
+ public QueryExecutionException(String msg) {
+ super(msg);
+ this.msg = msg;
+ }
+
+ /**
+ * Constructor
+ *
+ * @param errorCode The error code for this exception.
+ * @param msg The error message for this exception.
+ */
+ public QueryExecutionException(String msg, Throwable t) {
+ super(msg, t);
+ this.msg = msg;
+ }
+
+ /**
+ * Constructor
+ *
+ * @param t
+ */
+ public QueryExecutionException(Throwable t) {
+ super(t);
+ }
+
+ /**
+ * This method is used to get the localized message.
+ *
+ * @param locale - A Locale object represents a specific geographical,
+ * political, or cultural region.
+ * @return - Localized error message.
+ */
+ public String getLocalizedMessage(Locale locale) {
+ return "";
+ }
+
+ /**
+ * getLocalizedMessage
+ */
+ @Override public String getLocalizedMessage() {
+ return super.getLocalizedMessage();
+ }
+
+ /**
+ * getMessage
+ */
+ public String getMessage() {
+ return this.msg;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/executor/impl/AbstractQueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/impl/AbstractQueryExecutor.java b/core/src/main/java/org/carbondata/scan/executor/impl/AbstractQueryExecutor.java
new file mode 100644
index 0000000..eb2261d
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/executor/impl/AbstractQueryExecutor.java
@@ -0,0 +1,403 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.executor.impl;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.carbondata.common.logging.LogService;
+import org.carbondata.common.logging.LogServiceFactory;
+import org.carbondata.common.logging.impl.StandardLogService;
+import org.carbondata.core.carbon.datastore.BlockIndexStore;
+import org.carbondata.core.carbon.datastore.IndexKey;
+import org.carbondata.core.carbon.datastore.block.AbstractIndex;
+import org.carbondata.core.carbon.datastore.block.SegmentProperties;
+import org.carbondata.core.carbon.datastore.exception.IndexBuilderException;
+import org.carbondata.core.carbon.metadata.datatype.DataType;
+import org.carbondata.core.carbon.metadata.encoder.Encoding;
+import org.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension;
+import org.carbondata.core.carbon.metadata.schema.table.column.CarbonMeasure;
+import org.carbondata.core.constants.CarbonCommonConstants;
+import org.carbondata.core.datastorage.store.impl.FileFactory;
+import org.carbondata.core.keygenerator.KeyGenException;
+import org.carbondata.core.keygenerator.KeyGenerator;
+import org.carbondata.core.util.CarbonUtil;
+import org.carbondata.scan.executor.QueryExecutor;
+import org.carbondata.scan.executor.exception.QueryExecutionException;
+import org.carbondata.scan.executor.infos.AggregatorInfo;
+import org.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.carbondata.scan.executor.infos.KeyStructureInfo;
+import org.carbondata.scan.executor.infos.SortInfo;
+import org.carbondata.scan.executor.util.QueryUtil;
+import org.carbondata.scan.executor.util.RestructureUtil;
+import org.carbondata.scan.filter.FilterUtil;
+import org.carbondata.scan.model.QueryDimension;
+import org.carbondata.scan.model.QueryMeasure;
+import org.carbondata.scan.model.QueryModel;
+
+import org.apache.commons.lang3.ArrayUtils;
+
+/**
+ * This class provides a skeletal implementation of the {@link QueryExecutor}
+ * interface to minimize the effort required to implement this interface. This
+ * will be used to prepare all the properties required for query execution
+ */
+public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
+
+ private static final LogService LOGGER =
+ LogServiceFactory.getLogService(AbstractQueryExecutor.class.getName());
+ /**
+ * holder for query properties which will be used to execute the query
+ */
+ protected QueryExecutorProperties queryProperties;
+
+ public AbstractQueryExecutor() {
+ queryProperties = new QueryExecutorProperties();
+ }
+
+ /**
+ * Below method will be used to fill the executor properties based on query
+ * model it will parse the query model and get the detail and fill it in
+ * query properties
+ *
+ * @param queryModel
+ */
+ protected void initQuery(QueryModel queryModel) throws QueryExecutionException {
+ StandardLogService.setThreadName(StandardLogService.getPartitionID(
+ queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName()),
+ queryModel.getQueryId());
+ LOGGER.info("Query will be executed on table: " + queryModel.getAbsoluteTableIdentifier()
+ .getCarbonTableIdentifier().getTableName());
+
+ QueryUtil.resolveQueryModel(queryModel);
+
+ // get the table blocks
+ try {
+ queryProperties.dataBlocks = BlockIndexStore.getInstance()
+ .loadAndGetBlocks(queryModel.getTableBlockInfos(),
+ queryModel.getAbsoluteTableIdentifier());
+ } catch (IndexBuilderException e) {
+ throw new QueryExecutionException(e);
+ }
+ //
+ // // updating the restructuring infos for the query
+ queryProperties.keyStructureInfo = getKeyStructureInfo(queryModel,
+ queryProperties.dataBlocks.get(queryProperties.dataBlocks.size() - 1).getSegmentProperties()
+ .getDimensionKeyGenerator());
+
+ // calculating the total number of aggeragted columns
+ int aggTypeCount = queryModel.getQueryMeasures().size();
+
+ int currentIndex = 0;
+ String[] aggTypes = new String[aggTypeCount];
+ DataType[] dataTypes = new DataType[aggTypeCount];
+
+ for (QueryMeasure carbonMeasure : queryModel.getQueryMeasures()) {
+ // adding the data type and aggregation type of all the measure this
+ // can be used
+ // to select the aggregator
+ aggTypes[currentIndex] = carbonMeasure.getAggregateFunction();
+ dataTypes[currentIndex] = carbonMeasure.getMeasure().getDataType();
+ currentIndex++;
+ }
+ queryProperties.measureDataTypes = dataTypes;
+ // as aggregation will be executed in following order
+ // 1.aggregate dimension expression
+ // 2. expression
+ // 3. query measure
+ // so calculating the index of the expression start index
+ // and measure column start index
+ queryProperties.aggExpressionStartIndex = queryModel.getQueryMeasures().size();
+ queryProperties.measureStartIndex = aggTypes.length - queryModel.getQueryMeasures().size();
+
+ // dictionary column unique column id to dictionary mapping
+ // which will be used to get column actual data
+ queryProperties.columnToDictionayMapping = QueryUtil
+ .getDimensionDictionaryDetail(queryModel.getQueryDimension(),
+ queryModel.getAbsoluteTableIdentifier());
+ queryModel.setColumnToDictionaryMapping(queryProperties.columnToDictionayMapping);
+ // setting the sort dimension index. as it will be updated while getting the sort info
+ // so currently setting it to default 0 means sort is not present in any dimension
+ queryProperties.sortDimIndexes = new byte[queryModel.getQueryDimension().size()];
+ }
+
+ /**
+ * Below method will be used to get the key structure info for the uqery
+ *
+ * @param queryModel query model
+ * @param keyGenerator
+ * @return key structure info
+ */
+ private KeyStructureInfo getKeyStructureInfo(QueryModel queryModel, KeyGenerator keyGenerator) {
+ // getting the masked byte range for dictionary column
+ int[] maskByteRanges =
+ QueryUtil.getMaskedByteRange(queryModel.getQueryDimension(), keyGenerator);
+
+ // getting the masked bytes for query dimension dictionary column
+ int[] maskedBytes = QueryUtil.getMaskedByte(keyGenerator.getKeySizeInBytes(), maskByteRanges);
+
+ // max key for the dictionary dimension present in the query
+ byte[] maxKey = null;
+ try {
+ // getting the max key which will be used to masked and get the
+ // masked key
+ maxKey = QueryUtil.getMaxKeyBasedOnDimensions(queryModel.getQueryDimension(), keyGenerator);
+ } catch (KeyGenException e) {
+ LOGGER.error(e, "problem while getting the max key");
+ }
+
+ KeyStructureInfo restructureInfos = new KeyStructureInfo();
+ restructureInfos.setKeyGenerator(keyGenerator);
+ restructureInfos.setMaskByteRanges(maskByteRanges);
+ restructureInfos.setMaskedBytes(maskedBytes);
+ restructureInfos.setMaxKey(maxKey);
+ return restructureInfos;
+ }
+
+ protected List<BlockExecutionInfo> getBlockExecutionInfos(QueryModel queryModel)
+ throws QueryExecutionException {
+ initQuery(queryModel);
+ List<BlockExecutionInfo> blockExecutionInfoList = new ArrayList<BlockExecutionInfo>();
+ // fill all the block execution infos for all the blocks selected in
+ // query
+ // and query will be executed based on that infos
+ for (int i = 0; i < queryProperties.dataBlocks.size(); i++) {
+ blockExecutionInfoList
+ .add(getBlockExecutionInfoForBlock(queryModel, queryProperties.dataBlocks.get(i)));
+ }
+ return blockExecutionInfoList;
+ }
+
+ /**
+ * Below method will be used to get the block execution info which is
+ * required to execute any block based on query model
+ *
+ * @param queryModel query model from user query
+ * @param blockIndex block index
+ * @return block execution info
+ * @throws QueryExecutionException any failure during block info creation
+ */
+ protected BlockExecutionInfo getBlockExecutionInfoForBlock(QueryModel queryModel,
+ AbstractIndex blockIndex) throws QueryExecutionException {
+ BlockExecutionInfo blockExecutionInfo = new BlockExecutionInfo();
+ SegmentProperties segmentProperties = blockIndex.getSegmentProperties();
+ List<CarbonDimension> tableBlockDimensions = segmentProperties.getDimensions();
+ KeyGenerator blockKeyGenerator = segmentProperties.getDimensionKeyGenerator();
+
+ // below is to get only those dimension in query which is present in the
+ // table block
+ List<QueryDimension> updatedQueryDimension = RestructureUtil
+ .getUpdatedQueryDimension(queryModel.getQueryDimension(), tableBlockDimensions);
+ // TODO add complex dimension children
+ int[] maskByteRangesForBlock =
+ QueryUtil.getMaskedByteRange(updatedQueryDimension, blockKeyGenerator);
+ int[] maksedByte =
+ QueryUtil.getMaskedByte(blockKeyGenerator.getKeySizeInBytes(), maskByteRangesForBlock);
+ blockExecutionInfo.setDimensionsExistInQuery(updatedQueryDimension.size() > 0);
+ blockExecutionInfo.setDataBlock(blockIndex);
+ blockExecutionInfo.setBlockKeyGenerator(blockKeyGenerator);
+ // adding aggregation info for query
+ blockExecutionInfo.setAggregatorInfo(getAggregatorInfoForBlock(queryModel, blockIndex));
+
+ // setting the limit
+ blockExecutionInfo.setLimit(queryModel.getLimit());
+ // setting whether detail query or not
+ blockExecutionInfo.setDetailQuery(queryModel.isDetailQuery());
+ // setting whether raw record query or not
+ blockExecutionInfo.setRawRecordDetailQuery(queryModel.isForcedDetailRawQuery());
+ // setting the masked byte of the block which will be
+ // used to update the unpack the older block keys
+ blockExecutionInfo.setMaskedByteForBlock(maksedByte);
+ // total number dimension
+ blockExecutionInfo
+ .setTotalNumberDimensionBlock(segmentProperties.getDimensionOrdinalToBlockMapping().size());
+ blockExecutionInfo
+ .setTotalNumberOfMeasureBlock(segmentProperties.getMeasuresOrdinalToBlockMapping().size());
+ // to check whether older block key update is required or not
+ blockExecutionInfo.setFixedKeyUpdateRequired(
+ blockKeyGenerator.equals(queryProperties.keyStructureInfo.getKeyGenerator()));
+ IndexKey startIndexKey = null;
+ IndexKey endIndexKey = null;
+ if (null != queryModel.getFilterExpressionResolverTree()) {
+ // loading the filter executer tree for filter evaluation
+ blockExecutionInfo.setFilterExecuterTree(FilterUtil
+ .getFilterExecuterTree(queryModel.getFilterExpressionResolverTree(), segmentProperties));
+ List<IndexKey> listOfStartEndKeys = new ArrayList<IndexKey>(2);
+ FilterUtil.traverseResolverTreeAndGetStartAndEndKey(segmentProperties,
+ queryModel.getAbsoluteTableIdentifier(), queryModel.getFilterExpressionResolverTree(),
+ listOfStartEndKeys);
+ startIndexKey = listOfStartEndKeys.get(0);
+ endIndexKey = listOfStartEndKeys.get(1);
+ } else {
+ try {
+ startIndexKey = FilterUtil.prepareDefaultStartIndexKey(segmentProperties);
+ endIndexKey = FilterUtil.prepareDefaultEndIndexKey(segmentProperties);
+ } catch (KeyGenException e) {
+ throw new QueryExecutionException(e);
+ }
+ }
+ blockExecutionInfo.setFileType(
+ FileFactory.getFileType(queryModel.getAbsoluteTableIdentifier().getStorePath()));
+ //setting the start index key of the block node
+ blockExecutionInfo.setStartKey(startIndexKey);
+ //setting the end index key of the block node
+ blockExecutionInfo.setEndKey(endIndexKey);
+ // expression dimensions
+ List<CarbonDimension> expressionDimensions =
+ new ArrayList<CarbonDimension>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+ // expression measure
+ List<CarbonMeasure> expressionMeasures =
+ new ArrayList<CarbonMeasure>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+ // setting all the dimension chunk indexes to be read from file
+ blockExecutionInfo.setAllSelectedDimensionBlocksIndexes(QueryUtil
+ .getDimensionsBlockIndexes(updatedQueryDimension,
+ segmentProperties.getDimensionOrdinalToBlockMapping(), expressionDimensions));
+ // setting all the measure chunk indexes to be read from file
+ blockExecutionInfo.setAllSelectedMeasureBlocksIndexes(QueryUtil
+ .getMeasureBlockIndexes(queryModel.getQueryMeasures(), expressionMeasures,
+ segmentProperties.getMeasuresOrdinalToBlockMapping()));
+ // setting the key structure info which will be required
+ // to update the older block key with new key generator
+ blockExecutionInfo.setKeyStructureInfo(queryProperties.keyStructureInfo);
+ // setting the size of fixed key column (dictionary column)
+ blockExecutionInfo.setFixedLengthKeySize(getKeySize(updatedQueryDimension, segmentProperties));
+ Set<Integer> dictionaryColumnBlockIndex = new HashSet<Integer>();
+ List<Integer> noDictionaryColumnBlockIndex = new ArrayList<Integer>();
+ // get the block index to be read from file for query dimension
+ // for both dictionary columns and no dictionary columns
+ QueryUtil.fillQueryDimensionsBlockIndexes(updatedQueryDimension,
+ segmentProperties.getDimensionOrdinalToBlockMapping(), dictionaryColumnBlockIndex,
+ noDictionaryColumnBlockIndex);
+ int[] queryDictionaruColumnBlockIndexes = ArrayUtils.toPrimitive(
+ dictionaryColumnBlockIndex.toArray(new Integer[dictionaryColumnBlockIndex.size()]));
+ // need to sort the dictionary column as for all dimension
+ // column key will be filled based on key order
+ Arrays.sort(queryDictionaruColumnBlockIndexes);
+ blockExecutionInfo.setDictionaryColumnBlockIndex(queryDictionaruColumnBlockIndexes);
+ // setting the no dictionary column block indexes
+ blockExecutionInfo.setNoDictionaryBlockIndexes(ArrayUtils.toPrimitive(
+ noDictionaryColumnBlockIndex.toArray(new Integer[noDictionaryColumnBlockIndex.size()])));
+ // setting column id to dictionary mapping
+ blockExecutionInfo.setColumnIdToDcitionaryMapping(queryProperties.columnToDictionayMapping);
+ // setting each column value size
+ blockExecutionInfo.setEachColumnValueSize(segmentProperties.getEachDimColumnValueSize());
+ try {
+ // to set column group and its key structure info which will be used
+ // to
+ // for getting the column group column data in case of final row
+ // and in case of dimension aggregation
+ blockExecutionInfo.setColumnGroupToKeyStructureInfo(
+ QueryUtil.getColumnGroupKeyStructureInfo(updatedQueryDimension, segmentProperties));
+ } catch (KeyGenException e) {
+ throw new QueryExecutionException(e);
+ }
+ return blockExecutionInfo;
+ }
+
+ /**
+ * This method will be used to get fixed key length size this will be used
+ * to create a row from column chunk
+ *
+ * @param queryDimension query dimension
+ * @param blockMetadataInfo block metadata info
+ * @return key size
+ */
+ private int getKeySize(List<QueryDimension> queryDimension, SegmentProperties blockMetadataInfo) {
+ List<Integer> fixedLengthDimensionOrdinal =
+ new ArrayList<Integer>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+ int counter = 0;
+ while (counter < queryDimension.size()) {
+ if (queryDimension.get(counter).getDimension().numberOfChild() > 0) {
+ counter += queryDimension.get(counter).getDimension().numberOfChild();
+ continue;
+ } else if (!CarbonUtil.hasEncoding(queryDimension.get(counter).getDimension().getEncoder(),
+ Encoding.DICTIONARY)) {
+ counter++;
+ } else {
+ fixedLengthDimensionOrdinal.add(queryDimension.get(counter).getDimension().getKeyOrdinal());
+ counter++;
+ }
+ }
+ int[] dictioanryColumnOrdinal = ArrayUtils.toPrimitive(
+ fixedLengthDimensionOrdinal.toArray(new Integer[fixedLengthDimensionOrdinal.size()]));
+ if (dictioanryColumnOrdinal.length > 0) {
+ return blockMetadataInfo.getFixedLengthKeySplitter()
+ .getKeySizeByBlock(dictioanryColumnOrdinal);
+ }
+ return 0;
+ }
+
+ /**
+ * Below method will be used to get the sort information which will be
+ * required during sorting the data on dimension column
+ *
+ * @param queryModel query model
+ * @return Sort infos
+ * @throws QueryExecutionException if problem while
+ */
+ protected SortInfo getSortInfos(QueryModel queryModel) throws QueryExecutionException {
+
+ // get the masked by range for order by dimension
+ int[][] maskedByteRangeForSorting = QueryUtil
+ .getMaskedByteRangeForSorting(queryModel.getSortDimension(),
+ queryProperties.keyStructureInfo.getKeyGenerator(),
+ queryProperties.keyStructureInfo.getMaskByteRanges());
+ // get masked key for sorting
+ byte[][] maksedKeyForSorting = QueryUtil.getMaksedKeyForSorting(queryModel.getSortDimension(),
+ queryProperties.keyStructureInfo.getKeyGenerator(), maskedByteRangeForSorting,
+ queryProperties.keyStructureInfo.getMaskByteRanges());
+ // fill sort dimension indexes
+ queryProperties.sortDimIndexes = QueryUtil
+ .getSortDimensionIndexes(queryModel.getSortDimension(), queryModel.getQueryDimension());
+ SortInfo sortInfos = new SortInfo();
+ sortInfos.setDimensionMaskKeyForSorting(maksedKeyForSorting);
+ sortInfos.setDimensionSortOrder(queryModel.getSortOrder());
+ sortInfos.setMaskedByteRangeForSorting(maskedByteRangeForSorting);
+ sortInfos.setSortDimensionIndex(queryProperties.sortDimIndexes);
+ sortInfos.setSortDimension(queryModel.getSortDimension());
+ return sortInfos;
+ }
+
+ /**
+ * Below method will be used to get the aggrgator info for the query
+ *
+ * @param queryModel query model
+ * @param tableBlock table block
+ * @return aggregator info
+ */
+ private AggregatorInfo getAggregatorInfoForBlock(QueryModel queryModel,
+ AbstractIndex tableBlock) {
+ // getting the aggregate infos which will be used during aggregation
+ AggregatorInfo aggregatorInfos = RestructureUtil
+ .getAggregatorInfos(queryModel.getQueryMeasures(),
+ tableBlock.getSegmentProperties().getMeasures());
+ // setting the index of expression in measure aggregators
+ aggregatorInfos.setExpressionAggregatorStartIndex(queryProperties.aggExpressionStartIndex);
+ // setting the index of measure columns in measure aggregators
+ aggregatorInfos.setMeasureAggregatorStartIndex(queryProperties.measureStartIndex);
+ // setting the measure aggregator for all aggregation function selected
+ // in query
+ aggregatorInfos.setMeasureDataTypes(queryProperties.measureDataTypes);
+ return aggregatorInfos;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/executor/impl/DetailQueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/impl/DetailQueryExecutor.java b/core/src/main/java/org/carbondata/scan/executor/impl/DetailQueryExecutor.java
new file mode 100644
index 0000000..8232567
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/executor/impl/DetailQueryExecutor.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.executor.impl;
+
+import java.util.List;
+
+import org.carbondata.core.iterator.CarbonIterator;
+import org.carbondata.scan.executor.exception.QueryExecutionException;
+import org.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.carbondata.scan.model.QueryModel;
+import org.carbondata.scan.result.iterator.ChunkRowIterator;
+import org.carbondata.scan.result.iterator.DetailQueryResultIterator;
+import org.carbondata.scan.result.preparator.impl.DetailQueryResultPreparatorImpl;
+
+/**
+ * Below class will be used to execute the detail query
+ * For executing the detail query it will pass all the block execution
+ * info to detail query result iterator and iterator will be returned
+ */
+public class DetailQueryExecutor extends AbstractQueryExecutor {
+
+ @Override public CarbonIterator<Object[]> execute(QueryModel queryModel)
+ throws QueryExecutionException {
+ List<BlockExecutionInfo> blockExecutionInfoList = getBlockExecutionInfos(queryModel);
+ return new ChunkRowIterator(
+ new DetailQueryResultIterator(blockExecutionInfoList, queryModel,
+ new DetailQueryResultPreparatorImpl(queryProperties, queryModel)));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/executor/impl/DetailRawRecordQueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/impl/DetailRawRecordQueryExecutor.java b/core/src/main/java/org/carbondata/scan/executor/impl/DetailRawRecordQueryExecutor.java
new file mode 100644
index 0000000..1ce0a36
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/executor/impl/DetailRawRecordQueryExecutor.java
@@ -0,0 +1,24 @@
+package org.carbondata.scan.executor.impl;
+
+import java.util.List;
+
+import org.carbondata.core.iterator.CarbonIterator;
+import org.carbondata.scan.executor.exception.QueryExecutionException;
+import org.carbondata.scan.executor.infos.BlockExecutionInfo;
+import org.carbondata.scan.model.QueryModel;
+import org.carbondata.scan.result.BatchResult;
+import org.carbondata.scan.result.iterator.DetailQueryResultIterator;
+import org.carbondata.scan.result.preparator.impl.RawQueryResultPreparatorImpl;
+
+/**
+ * Executor for raw records, it does not parse to actual data
+ */
+public class DetailRawRecordQueryExecutor extends AbstractQueryExecutor<BatchResult> {
+
+ @Override public CarbonIterator<BatchResult> execute(QueryModel queryModel)
+ throws QueryExecutionException {
+ List<BlockExecutionInfo> blockExecutionInfoList = getBlockExecutionInfos(queryModel);
+ return new DetailQueryResultIterator(blockExecutionInfoList, queryModel,
+ new RawQueryResultPreparatorImpl(queryProperties, queryModel));
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/executor/impl/QueryExecutorProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/impl/QueryExecutorProperties.java b/core/src/main/java/org/carbondata/scan/executor/impl/QueryExecutorProperties.java
new file mode 100644
index 0000000..a004dce
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/executor/impl/QueryExecutorProperties.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.executor.impl;
+
+import java.util.List;
+import java.util.Map;
+
+import org.carbondata.core.cache.dictionary.Dictionary;
+import org.carbondata.core.carbon.datastore.block.AbstractIndex;
+import org.carbondata.core.carbon.metadata.datatype.DataType;
+import org.carbondata.scan.executor.infos.KeyStructureInfo;
+
+/**
+ * Holds all the properties required for query execution
+ */
+public class QueryExecutorProperties {
+
+ /**
+ * list of blocks in which query will be executed
+ */
+ protected List<AbstractIndex> dataBlocks;
+
+ /**
+ * holds the information required for updating the order block
+ * dictionary key
+ */
+ public KeyStructureInfo keyStructureInfo;
+
+ /**
+ * as we have multiple type of column aggregation like
+ * dimension,expression,measure so this will be used to for getting the
+ * measure aggregation start index
+ */
+ public int measureStartIndex;
+
+ /**
+ * query like count(1),count(*) ,etc will used this parameter
+ */
+ public boolean isFunctionQuery;
+
+ /**
+ * aggExpressionStartIndex
+ */
+ public int aggExpressionStartIndex;
+
+ /**
+ * index of the dimension which is present in the order by
+ * in a query
+ */
+ public byte[] sortDimIndexes;
+
+ /**
+ * this will hold the information about the dictionary dimension
+ * which to
+ */
+ public Map<String, Dictionary> columnToDictionayMapping;
+
+ /**
+ * Measure datatypes
+ */
+ public DataType[] measureDataTypes;
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/executor/infos/AggregatorInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/infos/AggregatorInfo.java b/core/src/main/java/org/carbondata/scan/executor/infos/AggregatorInfo.java
new file mode 100644
index 0000000..2c163e1
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/executor/infos/AggregatorInfo.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.executor.infos;
+
+import org.carbondata.core.carbon.metadata.datatype.DataType;
+
+/**
+ * Info class which store all the details
+ * which is required during aggregation
+ */
+public class AggregatorInfo {
+
+ /**
+ * selected query measure ordinal
+ * which will be used to read the measures chunk data
+ * this will be storing the index of the measure in measures chunk
+ */
+ private int[] measureOrdinals;
+
+ /**
+ * This parameter will be used to
+ * check whether particular measure is present
+ * in the table block, if not then its default value will be used
+ */
+ private boolean[] measureExists;
+
+ /**
+ * this default value will be used to when some measure is not present
+ * in the table block, in case of restructuring of the table if user is adding any
+ * measure then in older block that measure wont be present so for measure default value
+ * will be used to aggregate in the older table block query execution
+ */
+ private Object[] defaultValues;
+
+ /**
+ * In carbon there are three type of aggregation
+ * (dimension aggregation, expression aggregation and measure aggregation)
+ * Below index will be used to set the start position of expression in measures
+ * aggregator array
+ */
+ private int expressionAggregatorStartIndex;
+
+ /**
+ * In carbon there are three type of aggregation
+ * (dimension aggregation, expression aggregation and measure aggregation)
+ * Below index will be used to set the start position of measures in measures
+ * aggregator array
+ */
+ private int measureAggregatorStartIndex;
+
+ /**
+ * Datatype of each measure;
+ */
+ private DataType[] measureDataTypes;
+
+ /**
+ * @return the measureOrdinal
+ */
+ public int[] getMeasureOrdinals() {
+ return measureOrdinals;
+ }
+
+ /**
+ * @param measureOrdinal the measureOrdinal to set
+ */
+ public void setMeasureOrdinals(int[] measureOrdinal) {
+ this.measureOrdinals = measureOrdinal;
+ }
+
+ /**
+ * @return the measureExists
+ */
+ public boolean[] getMeasureExists() {
+ return measureExists;
+ }
+
+ /**
+ * @param measureExists the measureExists to set
+ */
+ public void setMeasureExists(boolean[] measureExists) {
+ this.measureExists = measureExists;
+ }
+
+ /**
+ * @return the defaultValues
+ */
+ public Object[] getDefaultValues() {
+ return defaultValues;
+ }
+
+ /**
+ * @param defaultValues the defaultValues to set
+ */
+ public void setDefaultValues(Object[] defaultValues) {
+ this.defaultValues = defaultValues;
+ }
+
+ /**
+ * @return the expressionAggregatorStartIndex
+ */
+ public int getExpressionAggregatorStartIndex() {
+ return expressionAggregatorStartIndex;
+ }
+
+ /**
+ * @param expressionAggregatorStartIndex the expressionAggregatorStartIndex to set
+ */
+ public void setExpressionAggregatorStartIndex(int expressionAggregatorStartIndex) {
+ this.expressionAggregatorStartIndex = expressionAggregatorStartIndex;
+ }
+
+ /**
+ * @return the measureAggregatorStartIndex
+ */
+ public int getMeasureAggregatorStartIndex() {
+ return measureAggregatorStartIndex;
+ }
+
+ /**
+ * @param measureAggregatorStartIndex the measureAggregatorStartIndex to set
+ */
+ public void setMeasureAggregatorStartIndex(int measureAggregatorStartIndex) {
+ this.measureAggregatorStartIndex = measureAggregatorStartIndex;
+ }
+
+ public DataType[] getMeasureDataTypes() {
+ return measureDataTypes;
+ }
+
+ public void setMeasureDataTypes(DataType[] measureDataTypes) {
+ this.measureDataTypes = measureDataTypes;
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/executor/infos/BlockExecutionInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/infos/BlockExecutionInfo.java b/core/src/main/java/org/carbondata/scan/executor/infos/BlockExecutionInfo.java
new file mode 100644
index 0000000..dc55e46
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/executor/infos/BlockExecutionInfo.java
@@ -0,0 +1,611 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.executor.infos;
+
+import java.util.Map;
+
+import org.carbondata.core.cache.dictionary.Dictionary;
+import org.carbondata.core.carbon.datastore.DataRefNode;
+import org.carbondata.core.carbon.datastore.IndexKey;
+import org.carbondata.core.carbon.datastore.block.AbstractIndex;
+import org.carbondata.core.datastorage.store.impl.FileFactory.FileType;
+import org.carbondata.core.keygenerator.KeyGenerator;
+import org.carbondata.scan.filter.executer.FilterExecuter;
+
+/**
+ * Below class will have all the properties which needed during query execution
+ * for one block
+ */
+public class BlockExecutionInfo {
+
+ /**
+ * block on which query will be executed
+ */
+ private AbstractIndex blockIndex;
+
+ /**
+ * each segment key size can be different and in that case we need to update
+ * the fixed key with latest segment key generator. so this property will
+ * tell whether this is required or not if key size is same then it is not
+ * required
+ */
+ private boolean isFixedKeyUpdateRequired;
+
+ /**
+ * in case of detail+order by query when number of output record is same we
+ * need to store data in the disk, so for this check will be used to whether
+ * we can write in the disk or not
+ */
+ private boolean isFileBasedQuery;
+
+ /**
+ * id of the query. this will be used to create directory while writing the
+ * data file in case of detail+order by query
+ */
+ private String queryId;
+
+ /**
+ * this to handle limit query in case of detail query we are pushing down
+ * the limit to executor level so based on the number of limit we can
+ * process only that many records
+ */
+ private int limit;
+
+ /**
+ * below to store all the information required for aggregation during query
+ * execution
+ */
+ private AggregatorInfo aggregatorInfo;
+
+ /**
+ * this will be used to get the first tentative block from which query
+ * execution start, this will be useful in case of filter query to get the
+ * start block based on filter values
+ */
+ private IndexKey startKey;
+
+ /**
+ * this will be used to get the last tentative block till which scanning
+ * will be done, this will be useful in case of filter query to get the last
+ * block based on filter values
+ */
+ private IndexKey endKey;
+
+ /**
+ * masked byte for block which will be used to unpack the fixed length key,
+ * this will be used for updating the older block key with new block key
+ * generator
+ */
+ private int[] maskedByteForBlock;
+
+ /**
+ * flag to check whether query is detail query or aggregation query
+ */
+ private boolean isDetailQuery;
+
+ /**
+ * total number of dimension in block
+ */
+ private int totalNumberDimensionBlock;
+
+ /**
+ * total number of measure in block
+ */
+ private int totalNumberOfMeasureBlock;
+
+ /**
+ * will be used to read the dimension block from file
+ */
+ private int[] allSelectedDimensionBlocksIndexes;
+
+ /**
+ * will be used to read the measure block from file
+ */
+ private int[] allSelectedMeasureBlocksIndexes;
+
+ /**
+ * this will be used to update the older block fixed length keys with the
+ * new block fixed length key
+ */
+ private KeyStructureInfo keyStructureInfo;
+
+ /**
+ * below will be used to sort the data based
+ */
+ private SortInfo sortInfo;
+
+ /**
+ * first block from which query execution will start
+ */
+ private DataRefNode firstDataBlock;
+
+ /**
+ * number of block to be scanned in the query
+ */
+ private long numberOfBlockToScan;
+
+ /**
+ * key size of the fixed length dimension column
+ */
+ private int fixedLengthKeySize;
+
+ /**
+ * dictionary column block indexes based on query
+ */
+ private int[] dictionaryColumnBlockIndex;
+ /**
+ * no dictionary column block indexes in based on the query order
+ */
+ private int[] noDictionaryBlockIndexes;
+
+ /**
+ * key generator used for generating the table block fixed length key
+ */
+ private KeyGenerator blockKeyGenerator;
+
+ /**
+ * each column value size
+ */
+ private int[] eachColumnValueSize;
+
+ /**
+ * partition number
+ */
+ private String partitionId;
+
+ /**
+ * column group block index in file to key structure info mapping
+ */
+ private Map<Integer, KeyStructureInfo> columnGroupToKeyStructureInfo;
+
+ /**
+ * mapping of dictionary dimension to its dictionary mapping which will be
+ * used to get the actual data from dictionary for aggregation, sorting
+ */
+ private Map<String, Dictionary> columnIdToDcitionaryMapping;
+
+ /**
+ * filter tree to execute the filter
+ */
+ private FilterExecuter filterExecuterTree;
+
+ /**
+ * fileType
+ */
+ private FileType fileType;
+
+ /**
+ * whether it needs only raw byte records with out aggregation.
+ */
+ private boolean isRawRecordDetailQuery;
+
+ /**
+ * whether dimensions exist in query.
+ */
+ private boolean isDimensionsExistInQuery;
+
+ /**
+ * @return the tableBlock
+ */
+ public AbstractIndex getDataBlock() {
+ return blockIndex;
+ }
+
+ /**
+ * @param blockIndex the tableBlock to set
+ */
+ public void setDataBlock(AbstractIndex blockIndex) {
+ this.blockIndex = blockIndex;
+ }
+
+ /**
+ * @return the isFixedKeyUpdateRequired
+ */
+ public boolean isFixedKeyUpdateRequired() {
+ return isFixedKeyUpdateRequired;
+ }
+
+ /**
+ * @param isFixedKeyUpdateRequired the isFixedKeyUpdateRequired to set
+ */
+ public void setFixedKeyUpdateRequired(boolean isFixedKeyUpdateRequired) {
+ this.isFixedKeyUpdateRequired = isFixedKeyUpdateRequired;
+ }
+
+ /**
+ * @return the isFileBasedQuery
+ */
+ public boolean isFileBasedQuery() {
+ return isFileBasedQuery;
+ }
+
+ /**
+ * @param isFileBasedQuery the isFileBasedQuery to set
+ */
+ public void setFileBasedQuery(boolean isFileBasedQuery) {
+ this.isFileBasedQuery = isFileBasedQuery;
+ }
+
+ /**
+ * @return the queryId
+ */
+ public String getQueryId() {
+ return queryId;
+ }
+
+ /**
+ * @param queryId the queryId to set
+ */
+ public void setQueryId(String queryId) {
+ this.queryId = queryId;
+ }
+
+ /**
+ * @return the limit
+ */
+ public int getLimit() {
+ return limit;
+ }
+
+ /**
+ * @param limit the limit to set
+ */
+ public void setLimit(int limit) {
+ this.limit = limit;
+ }
+
+ /**
+ * @return the aggregatorInfos
+ */
+ public AggregatorInfo getAggregatorInfo() {
+ return aggregatorInfo;
+ }
+
+ /**
+ * @param aggregatorInfo the aggregatorInfos to set
+ */
+ public void setAggregatorInfo(AggregatorInfo aggregatorInfo) {
+ this.aggregatorInfo = aggregatorInfo;
+ }
+
+ /**
+ * @return the startKey
+ */
+ public IndexKey getStartKey() {
+ return startKey;
+ }
+
+ /**
+ * @param startKey the startKey to set
+ */
+ public void setStartKey(IndexKey startKey) {
+ this.startKey = startKey;
+ }
+
+ /**
+ * @return the endKey
+ */
+ public IndexKey getEndKey() {
+ return endKey;
+ }
+
+ /**
+ * @param endKey the endKey to set
+ */
+ public void setEndKey(IndexKey endKey) {
+ this.endKey = endKey;
+ }
+
+ /**
+ * @return the maskedByteForBlock
+ */
+ public int[] getMaskedByteForBlock() {
+ return maskedByteForBlock;
+ }
+
+ /**
+ * @param maskedByteForBlock the maskedByteForBlock to set
+ */
+ public void setMaskedByteForBlock(int[] maskedByteForBlock) {
+ this.maskedByteForBlock = maskedByteForBlock;
+ }
+
+ /**
+ * @return the isDetailQuery
+ */
+ public boolean isDetailQuery() {
+ return isDetailQuery;
+ }
+
+ /**
+ * @param isDetailQuery the isDetailQuery to set
+ */
+ public void setDetailQuery(boolean isDetailQuery) {
+ this.isDetailQuery = isDetailQuery;
+ }
+
+ /**
+ * @return the totalNumberDimensionBlock
+ */
+ public int getTotalNumberDimensionBlock() {
+ return totalNumberDimensionBlock;
+ }
+
+ /**
+ * @param totalNumberDimensionBlock the totalNumberDimensionBlock to set
+ */
+ public void setTotalNumberDimensionBlock(int totalNumberDimensionBlock) {
+ this.totalNumberDimensionBlock = totalNumberDimensionBlock;
+ }
+
+ /**
+ * @return the totalNumberOfMeasureBlock
+ */
+ public int getTotalNumberOfMeasureBlock() {
+ return totalNumberOfMeasureBlock;
+ }
+
+ /**
+ * @param totalNumberOfMeasureBlock the totalNumberOfMeasureBlock to set
+ */
+ public void setTotalNumberOfMeasureBlock(int totalNumberOfMeasureBlock) {
+ this.totalNumberOfMeasureBlock = totalNumberOfMeasureBlock;
+ }
+
+ /**
+ * @return the allSelectedDimensionBlocksIndexes
+ */
+ public int[] getAllSelectedDimensionBlocksIndexes() {
+ return allSelectedDimensionBlocksIndexes;
+ }
+
+ /**
+ * @param allSelectedDimensionBlocksIndexes the allSelectedDimensionBlocksIndexes to set
+ */
+ public void setAllSelectedDimensionBlocksIndexes(int[] allSelectedDimensionBlocksIndexes) {
+ this.allSelectedDimensionBlocksIndexes = allSelectedDimensionBlocksIndexes;
+ }
+
+ /**
+ * @return the allSelectedMeasureBlocksIndexes
+ */
+ public int[] getAllSelectedMeasureBlocksIndexes() {
+ return allSelectedMeasureBlocksIndexes;
+ }
+
+ /**
+ * @param allSelectedMeasureBlocksIndexes the allSelectedMeasureBlocksIndexes to set
+ */
+ public void setAllSelectedMeasureBlocksIndexes(int[] allSelectedMeasureBlocksIndexes) {
+ this.allSelectedMeasureBlocksIndexes = allSelectedMeasureBlocksIndexes;
+ }
+
+ /**
+ * @return the restructureInfos
+ */
+ public KeyStructureInfo getKeyStructureInfo() {
+ return keyStructureInfo;
+ }
+
+ /**
+ * @param keyStructureInfo the restructureInfos to set
+ */
+ public void setKeyStructureInfo(KeyStructureInfo keyStructureInfo) {
+ this.keyStructureInfo = keyStructureInfo;
+ }
+
+ /**
+ * @return the sortInfos
+ */
+ public SortInfo getSortInfo() {
+ return sortInfo;
+ }
+
+ /**
+ * @param sortInfo the sortInfos to set
+ */
+ public void setSortInfo(SortInfo sortInfo) {
+ this.sortInfo = sortInfo;
+ }
+
+ /**
+ * @return the firstDataBlock
+ */
+ public DataRefNode getFirstDataBlock() {
+ return firstDataBlock;
+ }
+
+ /**
+ * @param firstDataBlock the firstDataBlock to set
+ */
+ public void setFirstDataBlock(DataRefNode firstDataBlock) {
+ this.firstDataBlock = firstDataBlock;
+ }
+
+ /**
+ * @return the numberOfBlockToScan
+ */
+ public long getNumberOfBlockToScan() {
+ return numberOfBlockToScan;
+ }
+
+ /**
+ * @param numberOfBlockToScan the numberOfBlockToScan to set
+ */
+ public void setNumberOfBlockToScan(long numberOfBlockToScan) {
+ this.numberOfBlockToScan = numberOfBlockToScan;
+ }
+
+ /**
+ * @return the fixedLengthKeySize
+ */
+ public int getFixedLengthKeySize() {
+ return fixedLengthKeySize;
+ }
+
+ /**
+ * @param fixedLengthKeySize the fixedLengthKeySize to set
+ */
+ public void setFixedLengthKeySize(int fixedLengthKeySize) {
+ this.fixedLengthKeySize = fixedLengthKeySize;
+ }
+
+ /**
+ * @return the filterEvaluatorTree
+ */
+ public FilterExecuter getFilterExecuterTree() {
+ return filterExecuterTree;
+ }
+
+ /**
+ * @param filterExecuterTree the filterEvaluatorTree to set
+ */
+ public void setFilterExecuterTree(FilterExecuter filterExecuterTree) {
+ this.filterExecuterTree = filterExecuterTree;
+ }
+
+ /**
+ * @return the tableBlockKeyGenerator
+ */
+ public KeyGenerator getBlockKeyGenerator() {
+ return blockKeyGenerator;
+ }
+
+ /**
+ * @param tableBlockKeyGenerator the tableBlockKeyGenerator to set
+ */
+ public void setBlockKeyGenerator(KeyGenerator tableBlockKeyGenerator) {
+ this.blockKeyGenerator = tableBlockKeyGenerator;
+ }
+
+ /**
+ * @return the eachColumnValueSize
+ */
+ public int[] getEachColumnValueSize() {
+ return eachColumnValueSize;
+ }
+
+ /**
+ * @param eachColumnValueSize the eachColumnValueSize to set
+ */
+ public void setEachColumnValueSize(int[] eachColumnValueSize) {
+ this.eachColumnValueSize = eachColumnValueSize;
+ }
+
+ /**
+ * @return the partitionId
+ */
+ public String getPartitionId() {
+ return partitionId;
+ }
+
+ /**
+ * @param partitionId the partitionId to set
+ */
+ public void setPartitionId(String partitionId) {
+ this.partitionId = partitionId;
+ }
+
+ /**
+ * @return the dictionaryColumnBlockIndex
+ */
+ public int[] getDictionaryColumnBlockIndex() {
+ return dictionaryColumnBlockIndex;
+ }
+
+ /**
+ * @param dictionaryColumnBlockIndex the dictionaryColumnBlockIndex to set
+ */
+ public void setDictionaryColumnBlockIndex(int[] dictionaryColumnBlockIndex) {
+ this.dictionaryColumnBlockIndex = dictionaryColumnBlockIndex;
+ }
+
+ /**
+ * @return the noDictionaryBlockIndexes
+ */
+ public int[] getNoDictionaryBlockIndexes() {
+ return noDictionaryBlockIndexes;
+ }
+
+ /**
+ * @param noDictionaryBlockIndexes the noDictionaryBlockIndexes to set
+ */
+ public void setNoDictionaryBlockIndexes(int[] noDictionaryBlockIndexes) {
+ this.noDictionaryBlockIndexes = noDictionaryBlockIndexes;
+ }
+
+ /**
+ * @return the columnGroupToKeyStructureInfo
+ */
+ public Map<Integer, KeyStructureInfo> getColumnGroupToKeyStructureInfo() {
+ return columnGroupToKeyStructureInfo;
+ }
+
+ /**
+ * @param columnGroupToKeyStructureInfo the columnGroupToKeyStructureInfo to set
+ */
+ public void setColumnGroupToKeyStructureInfo(
+ Map<Integer, KeyStructureInfo> columnGroupToKeyStructureInfo) {
+ this.columnGroupToKeyStructureInfo = columnGroupToKeyStructureInfo;
+ }
+
+ /**
+ * @return the columnIdToDcitionaryMapping
+ */
+ public Map<String, Dictionary> getColumnIdToDcitionaryMapping() {
+ return columnIdToDcitionaryMapping;
+ }
+
+ /**
+ * @param columnIdToDcitionaryMapping the columnIdToDcitionaryMapping to set
+ */
+ public void setColumnIdToDcitionaryMapping(Map<String, Dictionary> columnIdToDcitionaryMapping) {
+ this.columnIdToDcitionaryMapping = columnIdToDcitionaryMapping;
+ }
+
+ /**
+ * @return the fileType
+ */
+ public FileType getFileType() {
+ return fileType;
+ }
+
+ /**
+ * @param fileType the fileType to set
+ */
+ public void setFileType(FileType fileType) {
+ this.fileType = fileType;
+ }
+
+ public boolean isRawRecordDetailQuery() {
+ return isRawRecordDetailQuery;
+ }
+
+ public void setRawRecordDetailQuery(boolean rawRecordDetailQuery) {
+ isRawRecordDetailQuery = rawRecordDetailQuery;
+ }
+
+ public boolean isDimensionsExistInQuery() {
+ return isDimensionsExistInQuery;
+ }
+
+ public void setDimensionsExistInQuery(boolean dimensionsExistInQuery) {
+ isDimensionsExistInQuery = dimensionsExistInQuery;
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/1c725f5b/core/src/main/java/org/carbondata/scan/executor/infos/KeyStructureInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/scan/executor/infos/KeyStructureInfo.java b/core/src/main/java/org/carbondata/scan/executor/infos/KeyStructureInfo.java
new file mode 100644
index 0000000..51874b4
--- /dev/null
+++ b/core/src/main/java/org/carbondata/scan/executor/infos/KeyStructureInfo.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.carbondata.scan.executor.infos;
+
+import org.carbondata.core.keygenerator.KeyGenerator;
+
+/**
+ * Below class will store the structure of the key
+ * used during query execution
+ */
+public class KeyStructureInfo {
+
+ /**
+ * it's actually a latest key generator
+ * last table block as this key generator will be used to
+ * to update the mdkey of the older slice with the new slice
+ */
+ private KeyGenerator keyGenerator;
+
+ /**
+ * mask bytes ranges for the query
+ */
+ private int[] maskByteRanges;
+
+ /**
+ * masked bytes of the query
+ */
+ private int[] maskedBytes;
+
+ /**
+ * max key for query execution
+ */
+ private byte[] maxKey;
+
+ /**
+ * mdkey start index of block
+ */
+ private int blockMdKeyStartOffset;
+
+ /**
+ * @return the keyGenerator
+ */
+ public KeyGenerator getKeyGenerator() {
+ return keyGenerator;
+ }
+
+ /**
+ * @param keyGenerator the keyGenerator to set
+ */
+ public void setKeyGenerator(KeyGenerator keyGenerator) {
+ this.keyGenerator = keyGenerator;
+ }
+
+ /**
+ * @return the maskByteRanges
+ */
+ public int[] getMaskByteRanges() {
+ return maskByteRanges;
+ }
+
+ /**
+ * @param maskByteRanges the maskByteRanges to set
+ */
+ public void setMaskByteRanges(int[] maskByteRanges) {
+ this.maskByteRanges = maskByteRanges;
+ }
+
+ /**
+ * @return the maskedBytes
+ */
+ public int[] getMaskedBytes() {
+ return maskedBytes;
+ }
+
+ /**
+ * @param maskedBytes the maskedBytes to set
+ */
+ public void setMaskedBytes(int[] maskedBytes) {
+ this.maskedBytes = maskedBytes;
+ }
+
+ /**
+ * @return the maxKey
+ */
+ public byte[] getMaxKey() {
+ return maxKey;
+ }
+
+ /**
+ * @param maxKey the maxKey to set
+ */
+ public void setMaxKey(byte[] maxKey) {
+ this.maxKey = maxKey;
+ }
+
+ /**
+ * @param startOffset
+ */
+ public void setBlockMdKeyStartOffset(int startOffset) {
+ this.blockMdKeyStartOffset = startOffset;
+ }
+
+ /**
+ * @return
+ */
+ public int getBlockMdKeyStartOffset() {
+ return this.blockMdKeyStartOffset;
+ }
+}