You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@pinot.apache.org by xi...@apache.org on 2019/03/19 05:41:46 UTC

[incubator-pinot] branch nested-object-indexing-1 created (now bdaa9c1)

This is an automated email from the ASF dual-hosted git repository.

xiangfu pushed a change to branch nested-object-indexing-1
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git.


      at bdaa9c1  Fixing license header

This branch includes the following new commits:

     new a1d2d69  Adding support for MATCHES Predicate
     new 91257aa  Enhancing PQL to support MATCHES predicate, can be used for searching within text, map, json and other complex objects
     new 74f8e6e  Adding support for Object Type
     new 224299d  Wiring up end to end to support indexing nested fields on complex objects
     new a230c64  Adding support for bytes type in realtime + nested object indexing
     new bdaa9c1  Fixing license header

The 6 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pinot.apache.org
For additional commands, e-mail: commits-help@pinot.apache.org


[incubator-pinot] 04/06: Wiring up end to end to support indexing nested fields on complex objects

Posted by xi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xiangfu pushed a commit to branch nested-object-indexing-1
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git

commit 224299deccf308ddfdab2e59556d413944596329
Author: kishore gopalakrishna <g....@gmail.com>
AuthorDate: Thu Jan 24 19:53:29 2019 -0800

    Wiring up end to end to support indexing nested fields on complex objects
---
 .../org/apache/pinot/common/data/FieldSpec.java    |   3 +
 .../pinot/common/data/objects/TextObject.java      |   3 +-
 pinot-core/pom.xml                                 |  19 ++-
 .../immutable/ImmutableSegmentLoader.java          |   4 +-
 .../io/reader/impl/v1/SortedIndexReaderImpl.java   |   6 +
 .../core/operator/filter/FilterOperatorUtils.java  |   8 +-
 .../filter/IndexBasedMatchesFilterOperator.java    |  86 +++++++++++
 .../filter/ScanBasedMatchesFilterOperator.java     |  33 ++--
 .../MatchesPredicateEvaluatorFactory.java          |  21 +--
 .../invertedindex/RealtimeInvertedIndexReader.java |   8 +-
 .../core/segment/creator/InvertedIndexCreator.java |   8 +
 .../core/segment/creator/impl/V1Constants.java     |   1 +
 .../creator/impl/inv/LuceneIndexCreator.java       | 127 +++++++++++++++
 .../inv/OffHeapBitmapInvertedIndexCreator.java     |   6 +
 .../impl/inv/OnHeapBitmapInvertedIndexCreator.java |   6 +
 .../index/column/PhysicalColumnIndexContainer.java |  22 ++-
 .../index/data/source/ColumnDataSource.java        |   2 +-
 .../loader/invertedindex/InvertedIndexHandler.java | 106 ++++++++++++-
 .../index/readers/BitmapInvertedIndexReader.java   |   7 +
 .../segment/index/readers/InvertedIndexReader.java |   8 +
 .../index/readers/LuceneInvertedIndexReader.java   | 157 +++++++++++++++++++
 .../virtualcolumn/DocIdVirtualColumnProvider.java  |   7 +-
 .../SingleStringVirtualColumnProvider.java         |   7 +-
 .../tests/LuceneIndexClusterIntegrationTest.java   | 172 +++++++++++++++++++++
 pinot-perf/pom.xml                                 |  35 ++++-
 .../org/apache/pinot/perf/LuceneBenchmark.java     |  80 ++++++++++
 .../apache/pinot/tools/perf/ZookeeperLauncher.java |   2 +-
 27 files changed, 887 insertions(+), 57 deletions(-)

diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java b/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java
index 080f0e7..ad5a5a3 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java
@@ -335,6 +335,9 @@ public abstract class FieldSpec implements Comparable<FieldSpec>, ConfigNodeLife
       case STRING:
         jsonSchema.set("type", convertStringsToJsonArray("null", "string"));
         return jsonSchema;
+      case BYTES:
+        jsonSchema.set("type", convertStringsToJsonArray("null", "bytes"));
+        return jsonSchema;
       default:
         throw new UnsupportedOperationException();
     }
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/objects/TextObject.java b/pinot-common/src/main/java/org/apache/pinot/common/data/objects/TextObject.java
index cf5f27e..c171c40 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/data/objects/TextObject.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/data/objects/TextObject.java
@@ -27,7 +27,8 @@ import com.google.common.collect.Lists;
 public class TextObject implements PinotObject {
 
   byte[] _bytes;
-  private static List<String> _FIELDS = Lists.newArrayList("Content");
+  public static String DEFAULT_FIELD = "Content";
+  private static List<String> _FIELDS = Lists.newArrayList(DEFAULT_FIELD);
 
   @Override
   public void init(byte[] bytes) {
diff --git a/pinot-core/pom.xml b/pinot-core/pom.xml
index 8f4efe2..4c8215a 100644
--- a/pinot-core/pom.xml
+++ b/pinot-core/pom.xml
@@ -196,7 +196,24 @@
         </exclusion>
       </exclusions>
     </dependency>
-
+    
+    <!-- Lucene -->
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-core</artifactId>
+      <version>7.6.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-queryparser</artifactId>
+      <version>7.6.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-analyzers-common</artifactId>
+      <version>7.6.0</version>
+    </dependency>
+    
     <!-- test -->
     <dependency>
       <groupId>org.mockito</groupId>
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/immutable/ImmutableSegmentLoader.java b/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/immutable/ImmutableSegmentLoader.java
index 2ddb80c..3d7ac31 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/immutable/ImmutableSegmentLoader.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/immutable/ImmutableSegmentLoader.java
@@ -118,8 +118,8 @@ public class ImmutableSegmentLoader {
     SegmentDirectory.Reader segmentReader = segmentDirectory.createReader();
     Map<String, ColumnIndexContainer> indexContainerMap = new HashMap<>();
     for (Map.Entry<String, ColumnMetadata> entry : segmentMetadata.getColumnMetadataMap().entrySet()) {
-      indexContainerMap
-          .put(entry.getKey(), new PhysicalColumnIndexContainer(segmentReader, entry.getValue(), indexLoadingConfig));
+      indexContainerMap.put(entry.getKey(),
+          new PhysicalColumnIndexContainer(indexDir, segmentReader, entry.getValue(), indexLoadingConfig));
     }
 
     if (schema == null) {
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/io/reader/impl/v1/SortedIndexReaderImpl.java b/pinot-core/src/main/java/org/apache/pinot/core/io/reader/impl/v1/SortedIndexReaderImpl.java
index 0018b8c..a393dd4 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/io/reader/impl/v1/SortedIndexReaderImpl.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/io/reader/impl/v1/SortedIndexReaderImpl.java
@@ -21,6 +21,8 @@ package org.apache.pinot.core.io.reader.impl.v1;
 import com.google.common.base.Preconditions;
 import java.io.IOException;
 import org.apache.pinot.common.utils.Pairs;
+import org.apache.pinot.common.utils.Pairs.IntPair;
+import org.apache.pinot.core.common.Predicate;
 import org.apache.pinot.core.io.reader.BaseSingleColumnSingleValueReader;
 import org.apache.pinot.core.io.reader.ReaderContext;
 import org.apache.pinot.core.io.util.FixedByteValueReaderWriter;
@@ -120,6 +122,10 @@ public class SortedIndexReaderImpl extends BaseSingleColumnSingleValueReader<Sor
   }
 
   @Override
+  public IntPair getDocIds(Predicate predicate) {
+    throw new UnsupportedOperationException("");
+  }
+  @Override
   public Pairs.IntPair getDocIds(int dictId) {
     return new Pairs.IntPair(_reader.getInt(2 * dictId), _reader.getInt(2 * dictId + 1));
   }
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/FilterOperatorUtils.java b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/FilterOperatorUtils.java
index bc46b2f..f4354ae 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/FilterOperatorUtils.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/FilterOperatorUtils.java
@@ -56,7 +56,13 @@ public class FilterOperatorUtils {
     // Use inverted index if the predicate type is not RANGE or REGEXP_LIKE for efficiency
     DataSourceMetadata dataSourceMetadata = dataSource.getDataSourceMetadata();
     Predicate.Type predicateType = predicateEvaluator.getPredicateType();
-    if (dataSourceMetadata.hasInvertedIndex() && (predicateType != Predicate.Type.RANGE) && (predicateType
+    if(predicateType == Predicate.Type.MATCHES) {
+      if(dataSourceMetadata.hasInvertedIndex()) {
+        return new IndexBasedMatchesFilterOperator(predicateEvaluator, dataSource, startDocId, endDocId);
+      } else {
+        return new ScanBasedMatchesFilterOperator(predicateEvaluator, dataSource, startDocId, endDocId);
+      }
+    } else if (dataSourceMetadata.hasInvertedIndex() && (predicateType != Predicate.Type.RANGE) && (predicateType
         != Predicate.Type.REGEXP_LIKE)) {
       if (dataSourceMetadata.isSorted()) {
         return new SortedInvertedIndexBasedFilterOperator(predicateEvaluator, dataSource, startDocId, endDocId);
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/IndexBasedMatchesFilterOperator.java b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/IndexBasedMatchesFilterOperator.java
new file mode 100644
index 0000000..50f39b7
--- /dev/null
+++ b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/IndexBasedMatchesFilterOperator.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.core.operator.filter;
+
+import com.google.common.base.Preconditions;
+import org.apache.lucene.search.TopDocs;
+import org.apache.pinot.core.common.DataSource;
+import org.apache.pinot.core.common.predicate.MatchesPredicate;
+import org.apache.pinot.core.operator.blocks.FilterBlock;
+import org.apache.pinot.core.operator.docidsets.BitmapDocIdSet;
+import org.apache.pinot.core.operator.filter.predicate.MatchesPredicateEvaluatorFactory.DefaultMatchesPredicateEvaluator;
+import org.apache.pinot.core.segment.index.readers.InvertedIndexReader;
+import org.apache.pinot.core.operator.filter.predicate.PredicateEvaluator;
+import org.roaringbitmap.buffer.ImmutableRoaringBitmap;
+import org.roaringbitmap.buffer.MutableRoaringBitmap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class IndexBasedMatchesFilterOperator extends BaseFilterOperator {
+
+  private static final Logger LOGGER =
+      LoggerFactory.getLogger(IndexBasedMatchesFilterOperator.class);
+  private static final String OPERATOR_NAME = "IndexBasedMatchesFilterOperator";
+
+  private final DataSource _dataSource;
+  private final int _startDocId;
+  // TODO: change it to exclusive
+  // Inclusive
+  private final int _endDocId;
+  private MatchesPredicate _matchesPredicate;
+
+  public IndexBasedMatchesFilterOperator(PredicateEvaluator predicateEvaluator,
+      DataSource dataSource, int startDocId, int endDocId) {
+    // NOTE:
+    // Predicate that is always evaluated as true or false should not be passed into the
+    // TextMatchFilterOperator for
+    // performance concern.
+    // If predicate is always evaluated as true, use MatchAllFilterOperator; if predicate is always
+    // evaluated as false,
+    // use EmptyFilterOperator.
+    Preconditions
+        .checkArgument(!predicateEvaluator.isAlwaysTrue() && !predicateEvaluator.isAlwaysFalse());
+    Preconditions.checkArgument(predicateEvaluator instanceof DefaultMatchesPredicateEvaluator);
+
+    DefaultMatchesPredicateEvaluator evaluator =
+        (DefaultMatchesPredicateEvaluator) predicateEvaluator;
+    _matchesPredicate = evaluator.getMatchesPredicate();
+    _dataSource = dataSource;
+    _startDocId = startDocId;
+    _endDocId = endDocId;
+  }
+
+  @Override
+  protected FilterBlock getNextBlock() {
+
+    InvertedIndexReader invertedIndex = _dataSource.getInvertedIndex();
+    MutableRoaringBitmap bitmap = (MutableRoaringBitmap) invertedIndex.getDocIds(_matchesPredicate);
+
+    boolean exclusive = false;
+    ImmutableRoaringBitmap[] bitmapArray = new ImmutableRoaringBitmap[] {
+        bitmap
+    };
+    return new FilterBlock(new BitmapDocIdSet(bitmapArray, _startDocId, _endDocId, exclusive));
+  }
+
+  @Override
+  public String getOperatorName() {
+    return OPERATOR_NAME;
+  }
+}
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/objects/TextObject.java b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/ScanBasedMatchesFilterOperator.java
similarity index 59%
copy from pinot-common/src/main/java/org/apache/pinot/common/data/objects/TextObject.java
copy to pinot-core/src/main/java/org/apache/pinot/core/operator/filter/ScanBasedMatchesFilterOperator.java
index cf5f27e..1f730e1 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/data/objects/TextObject.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/ScanBasedMatchesFilterOperator.java
@@ -16,36 +16,27 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package org.apache.pinot.common.data.objects;
+package org.apache.pinot.core.operator.filter;
 
-import java.util.List;
+import org.apache.pinot.core.common.DataSource;
+import org.apache.pinot.core.operator.blocks.FilterBlock;
+import org.apache.pinot.core.operator.filter.predicate.PredicateEvaluator;
 
-import org.apache.pinot.common.data.PinotObject;
+public class ScanBasedMatchesFilterOperator  extends BaseFilterOperator {
 
-import com.google.common.collect.Lists;
-
-public class TextObject implements PinotObject {
-
-  byte[] _bytes;
-  private static List<String> _FIELDS = Lists.newArrayList("Content");
-
-  @Override
-  public void init(byte[] bytes) {
-    _bytes = bytes;
+  public ScanBasedMatchesFilterOperator(PredicateEvaluator predicateEvaluator,
+      DataSource dataSource, int startDocId, int endDocId) {
+    // TODO Auto-generated constructor stub
   }
 
   @Override
-  public byte[] toBytes() {
-    return _bytes;
-  }
-
-  @Override
-  public List<String> getPropertyNames() {
-    return _FIELDS;
+  protected FilterBlock getNextBlock() {
+    // TODO Auto-generated method stub
+    return null;
   }
 
   @Override
-  public Object getProperty(String field) {
+  public String getOperatorName() {
     // TODO Auto-generated method stub
     return null;
   }
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/MatchesPredicateEvaluatorFactory.java b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/MatchesPredicateEvaluatorFactory.java
index 26b7e16..684573f 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/MatchesPredicateEvaluatorFactory.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/MatchesPredicateEvaluatorFactory.java
@@ -34,17 +34,15 @@ public class MatchesPredicateEvaluatorFactory {
    */
   public static BaseRawValueBasedPredicateEvaluator newRawValueBasedEvaluator(
       MatchesPredicate textMatchPredicate, FieldSpec.DataType dataType) {
-    return new RawValueBasedTextMatchPredicateEvaluator(textMatchPredicate);
+    return new DefaultMatchesPredicateEvaluator(textMatchPredicate);
   }
 
-  public static final class RawValueBasedTextMatchPredicateEvaluator
+  public static final class DefaultMatchesPredicateEvaluator
       extends BaseRawValueBasedPredicateEvaluator {
-    String _query;
-    String _options;
+    private MatchesPredicate _matchesPredicate;
 
-    public RawValueBasedTextMatchPredicateEvaluator(MatchesPredicate textMatchPredicate) {
-      _query = textMatchPredicate.getQuery();
-      _options = textMatchPredicate.getQueryOptions();
+    public DefaultMatchesPredicateEvaluator(MatchesPredicate matchesPredicate) {
+      this._matchesPredicate = matchesPredicate;
     }
 
     @Override
@@ -58,12 +56,9 @@ public class MatchesPredicateEvaluatorFactory {
           "Text Match is not supported via scanning, its supported only via inverted index");
     }
 
-    public String getQueryString() {
-      return _query;
-    }
-
-    public String getQueryOptions() {
-      return _options;
+    public MatchesPredicate getMatchesPredicate(){
+      return _matchesPredicate;
     }
+   
   }
 }
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/invertedindex/RealtimeInvertedIndexReader.java b/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/invertedindex/RealtimeInvertedIndexReader.java
index 363f688..4c45850 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/invertedindex/RealtimeInvertedIndexReader.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/invertedindex/RealtimeInvertedIndexReader.java
@@ -21,6 +21,8 @@ package org.apache.pinot.core.realtime.impl.invertedindex;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.pinot.core.common.Predicate;
 import org.apache.pinot.core.segment.index.readers.InvertedIndexReader;
 import org.roaringbitmap.buffer.MutableRoaringBitmap;
 
@@ -54,7 +56,11 @@ public class RealtimeInvertedIndexReader implements InvertedIndexReader<MutableR
       _bitmaps.get(dictId).checkAndAdd(docId);
     }
   }
-
+  
+  @Override
+  public MutableRoaringBitmap getDocIds(Predicate predicate) {
+    throw new UnsupportedOperationException("Predicate:"+ predicate + " is not supported");
+  }
   @Override
   public MutableRoaringBitmap getDocIds(int dictId) {
     ThreadSafeMutableRoaringBitmap bitmap;
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/InvertedIndexCreator.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/InvertedIndexCreator.java
index 3eafa9d..d5dadcf 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/InvertedIndexCreator.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/InvertedIndexCreator.java
@@ -21,6 +21,8 @@ package org.apache.pinot.core.segment.creator;
 import java.io.Closeable;
 import java.io.IOException;
 
+import org.apache.pinot.common.data.PinotObject;
+
 
 /**
  * Currently only support RoaringBitmap inverted index.
@@ -65,6 +67,12 @@ public interface InvertedIndexCreator extends Closeable {
    * For multi-valued column, adds the dictionary Ids for the next document.
    */
   void add(int[] dictIds, int length);
+  
+  /**
+   * For complex data types such as Map, JSON, TEXT
+   * @param object
+   */
+  void add(PinotObject object);
 
   /**
    * Seals the index and flushes it to disk.
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/V1Constants.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/V1Constants.java
index 3536018..7153f3a 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/V1Constants.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/V1Constants.java
@@ -60,6 +60,7 @@ public class V1Constants {
     public static final String RAW_SV_FORWARD_INDEX_FILE_EXTENSION = ".sv.raw.fwd";
     public static final String UNSORTED_MV_FORWARD_INDEX_FILE_EXTENSION = ".mv.fwd";
     public static final String BITMAP_INVERTED_INDEX_FILE_EXTENSION = ".bitmap.inv";
+    public static final String LUCENE_INVERTED_INDEX_DIR = ".lucene.inv";
     public static final String BLOOM_FILTER_FILE_EXTENSION = ".bloom";
   }
 
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/LuceneIndexCreator.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/LuceneIndexCreator.java
new file mode 100644
index 0000000..6622b57
--- /dev/null
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/LuceneIndexCreator.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.core.segment.creator.impl.inv;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.pinot.common.data.FieldSpec;
+import org.apache.pinot.common.data.PinotObject;
+import org.apache.pinot.core.segment.creator.InvertedIndexCreator;
+import org.apache.pinot.core.segment.index.ColumnMetadata;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LuceneIndexCreator implements InvertedIndexCreator {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(InvertedIndexCreator.class);
+
+  public static final String VERSION = "7.6.0";
+  // Index file will be flushed after reaching this threshold
+  private static final int MAX_BUFFER_SIZE_MB = 500;
+  private static final Field.Store DEFAULT_STORE = Field.Store.NO;
+  private final Analyzer _analyzer;
+  private final IndexWriter _writer;
+  private final IndexWriterConfig _indexWriterConfig;
+  private final Directory _indexDirectory;
+  // TODO:Figure out a way to avoid this
+  boolean _isText = false;
+
+  public LuceneIndexCreator(ColumnMetadata columnMetadata, File outputDirectory) {
+    // TODO: Get IndexConfig and set the different analyzer for each field by default we set
+    // StandardAnalyzer and use TextField. This can be expensive and inefficient if all we need is
+    // exact match. See keyword analyzer
+    _analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer());
+    _indexWriterConfig = new IndexWriterConfig(_analyzer);
+    _indexWriterConfig.setRAMBufferSizeMB(MAX_BUFFER_SIZE_MB);
+    _isText = "TEXT".equalsIgnoreCase(columnMetadata.getObjectType());
+    try {
+      _indexDirectory = FSDirectory.open(outputDirectory.toPath());
+      _writer = new IndexWriter(_indexDirectory, _indexWriterConfig);
+    } catch (IOException e) {
+      LOGGER.error("Encountered error creating LuceneIndexCreator ", e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public void add(int dictId) {
+    throw new UnsupportedOperationException(
+        "Lucene indexing not supported for dictionary encoded columns");
+  }
+
+  @Override
+  public void add(int[] dictIds, int length) {
+    throw new UnsupportedOperationException(
+        "Lucene indexing not supported for dictionary encoded columns");
+
+  }
+
+  @Override
+  public void add(PinotObject object) {
+    Document document = new Document();
+    List<String> propertyNames = object.getPropertyNames();
+    for (String propertyName : propertyNames) {
+      Field field;
+      // TODO: Figure out a way to avoid special casing Text, have a way to get propertyType from
+      // pinotObject?
+      // TODO: Handle list field
+      Object value = object.getProperty(propertyName);
+      if (value.getClass().isAssignableFrom(List.class)) {
+        List<?> list = (List<?>) value;
+        for (Object item : list) {
+          field = new TextField(propertyName, item.toString(), DEFAULT_STORE);
+          document.add(field);
+        }
+      } else {
+        field = new TextField(propertyName, value.toString(), DEFAULT_STORE);
+        document.add(field);
+      }
+    }
+    try {
+      _writer.addDocument(document);
+    } catch (IOException e) {
+      LOGGER.error("Encountered exception while adding doc:{}", document.toString(), e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public void seal() throws IOException {
+
+  }
+
+  @Override
+  public void close() throws IOException {
+    _writer.close();
+  }
+
+}
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/OffHeapBitmapInvertedIndexCreator.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/OffHeapBitmapInvertedIndexCreator.java
index c4e30b7..b525427 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/OffHeapBitmapInvertedIndexCreator.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/OffHeapBitmapInvertedIndexCreator.java
@@ -26,6 +26,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import org.apache.commons.io.FileUtils;
 import org.apache.pinot.common.data.FieldSpec;
+import org.apache.pinot.common.data.PinotObject;
 import org.apache.pinot.core.segment.creator.InvertedIndexCreator;
 import org.apache.pinot.core.segment.creator.impl.V1Constants;
 import org.apache.pinot.core.segment.memory.PinotDataBuffer;
@@ -119,6 +120,11 @@ public final class OffHeapBitmapInvertedIndexCreator implements InvertedIndexCre
   }
 
   @Override
+  public void add(PinotObject object) {
+   throw new UnsupportedOperationException("Bitmap Indexing not supported for Pinot Objects");
+  }
+  
+  @Override
   public void add(int dictId) {
     putInt(_forwardIndexValueBuffer, _nextDocId++, dictId);
     putInt(_invertedIndexLengthBuffer, dictId, getInt(_invertedIndexLengthBuffer, dictId) + 1);
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/OnHeapBitmapInvertedIndexCreator.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/OnHeapBitmapInvertedIndexCreator.java
index c358361..83bc69c 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/OnHeapBitmapInvertedIndexCreator.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/OnHeapBitmapInvertedIndexCreator.java
@@ -25,6 +25,7 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import org.apache.commons.io.FileUtils;
+import org.apache.pinot.common.data.PinotObject;
 import org.apache.pinot.core.segment.creator.InvertedIndexCreator;
 import org.apache.pinot.core.segment.creator.impl.V1Constants;
 import org.roaringbitmap.buffer.MutableRoaringBitmap;
@@ -47,6 +48,11 @@ public final class OnHeapBitmapInvertedIndexCreator implements InvertedIndexCrea
   }
 
   @Override
+  public void add(PinotObject object) {
+   throw new UnsupportedOperationException("Bitmap Indexing not supported for Pinot Objects");
+  }
+  
+  @Override
   public void add(int dictId) {
     _bitmaps[dictId].add(_nextDocId++);
   }
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/column/PhysicalColumnIndexContainer.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/column/PhysicalColumnIndexContainer.java
index df83533..29b6b46 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/column/PhysicalColumnIndexContainer.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/column/PhysicalColumnIndexContainer.java
@@ -18,8 +18,10 @@
  */
 package org.apache.pinot.core.segment.index.column;
 
+import java.io.File;
 import java.io.IOException;
 import org.apache.pinot.common.data.FieldSpec;
+import org.apache.pinot.common.data.FieldSpec.DataType;
 import org.apache.pinot.core.io.reader.DataFileReader;
 import org.apache.pinot.core.io.reader.SingleColumnSingleValueReader;
 import org.apache.pinot.core.io.reader.impl.v1.FixedBitMultiValueReader;
@@ -39,6 +41,7 @@ import org.apache.pinot.core.segment.index.readers.ImmutableDictionaryReader;
 import org.apache.pinot.core.segment.index.readers.IntDictionary;
 import org.apache.pinot.core.segment.index.readers.InvertedIndexReader;
 import org.apache.pinot.core.segment.index.readers.LongDictionary;
+import org.apache.pinot.core.segment.index.readers.LuceneInvertedIndexReader;
 import org.apache.pinot.core.segment.index.readers.OnHeapDoubleDictionary;
 import org.apache.pinot.core.segment.index.readers.OnHeapFloatDictionary;
 import org.apache.pinot.core.segment.index.readers.OnHeapIntDictionary;
@@ -60,7 +63,7 @@ public final class PhysicalColumnIndexContainer implements ColumnIndexContainer
   private final ImmutableDictionaryReader _dictionary;
   private final BloomFilterReader _bloomFilterReader;
 
-  public PhysicalColumnIndexContainer(SegmentDirectory.Reader segmentReader, ColumnMetadata metadata,
+  public PhysicalColumnIndexContainer(File indexDir, SegmentDirectory.Reader segmentReader, ColumnMetadata metadata,
       IndexLoadingConfig indexLoadingConfig)
       throws IOException {
     String columnName = metadata.getColumnName();
@@ -105,16 +108,25 @@ public final class PhysicalColumnIndexContainer implements ColumnIndexContainer
                 metadata.getBitsPerElement());
       }
       if (loadInvertedIndex) {
-        _invertedIndex =
-            new BitmapInvertedIndexReader(segmentReader.getIndexFor(columnName, ColumnIndexType.INVERTED_INDEX),
-                metadata.getCardinality());
+        if (metadata.getObjectType() == null && metadata.getFieldSpec().getDataType() != DataType.BYTES) {
+          _invertedIndex =
+              new BitmapInvertedIndexReader(segmentReader.getIndexFor(columnName, ColumnIndexType.INVERTED_INDEX),
+                  metadata.getCardinality());
+        } else {
+          _invertedIndex = new LuceneInvertedIndexReader(indexDir, metadata);
+        }
       } else {
         _invertedIndex = null;
       }
     } else {
       // Raw index
       _forwardIndex = loadRawForwardIndex(fwdIndexBuffer, metadata.getDataType());
-      _invertedIndex = null;
+      if (loadInvertedIndex && (metadata.getObjectType() != null
+          || metadata.getFieldSpec().getDataType() == DataType.BYTES)) {
+        _invertedIndex = new LuceneInvertedIndexReader(indexDir, metadata);
+      } else {
+        _invertedIndex = null;
+      }
       _dictionary = null;
       _bloomFilterReader = null;
     }
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/data/source/ColumnDataSource.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/data/source/ColumnDataSource.java
index 3044457..2734f75 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/data/source/ColumnDataSource.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/data/source/ColumnDataSource.java
@@ -87,7 +87,7 @@ public final class ColumnDataSource extends DataSource {
       }
     } else {
       // Raw index
-      Preconditions.checkState(invertedIndex == null);
+      //Preconditions.checkState(invertedIndex == null);
     }
 
     _operatorName = "ColumnDataSource [" + columnName + "]";
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/loader/invertedindex/InvertedIndexHandler.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/loader/invertedindex/InvertedIndexHandler.java
index 15fa01d..b9a2bec 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/loader/invertedindex/InvertedIndexHandler.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/loader/invertedindex/InvertedIndexHandler.java
@@ -20,16 +20,24 @@ package org.apache.pinot.core.segment.index.loader.invertedindex;
 
 import java.io.File;
 import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
 import java.util.HashSet;
 import java.util.Set;
 import javax.annotation.Nonnull;
 import org.apache.commons.io.FileUtils;
+import org.apache.pinot.common.data.PinotObject;
+import org.apache.pinot.common.data.objects.JSONObject;
+import org.apache.pinot.common.data.objects.MapObject;
+import org.apache.pinot.common.data.objects.TextObject;
+import org.apache.pinot.common.utils.TarGzCompressionUtils;
 import org.apache.pinot.core.indexsegment.generator.SegmentVersion;
 import org.apache.pinot.core.io.reader.DataFileReader;
 import org.apache.pinot.core.io.reader.SingleColumnMultiValueReader;
 import org.apache.pinot.core.io.reader.impl.v1.FixedBitMultiValueReader;
 import org.apache.pinot.core.io.reader.impl.v1.FixedBitSingleValueReader;
+import org.apache.pinot.core.io.reader.impl.v1.VarByteChunkSingleValueReader;
 import org.apache.pinot.core.segment.creator.impl.V1Constants;
+import org.apache.pinot.core.segment.creator.impl.inv.LuceneIndexCreator;
 import org.apache.pinot.core.segment.creator.impl.inv.OffHeapBitmapInvertedIndexCreator;
 import org.apache.pinot.core.segment.index.ColumnMetadata;
 import org.apache.pinot.core.segment.index.SegmentMetadataImpl;
@@ -70,11 +78,99 @@ public class InvertedIndexHandler {
   public void createInvertedIndices()
       throws IOException {
     for (ColumnMetadata columnMetadata : _invertedIndexColumns) {
-      createInvertedIndexForColumn(columnMetadata);
+      String objectType = columnMetadata.getObjectType();
+      if (objectType == null) {
+        createInvertedIndexForSimpleField(columnMetadata);
+      } else {
+        createInvertedIndexForComplexObject(columnMetadata);
+      }
     }
   }
 
-  private void createInvertedIndexForColumn(ColumnMetadata columnMetadata)
+  @SuppressWarnings("unchecked")
+  private void createInvertedIndexForComplexObject(ColumnMetadata columnMetadata)
+      throws IOException {
+    String column = columnMetadata.getColumnName();
+
+    File inProgress = new File(_indexDir, column + ".inv.inprogress");
+    File invertedIndexDir = new File(_indexDir, column + V1Constants.Indexes.LUCENE_INVERTED_INDEX_DIR);
+
+    if (!inProgress.exists()) {
+      // Marker file does not exist, which means last run ended normally.
+
+      if (invertedIndexDir.exists()) {
+        // Skip creating inverted index if already exists.
+        LOGGER.info("Found inverted index for segment: {}, column: {}", _segmentName, column);
+        return;
+      }
+
+      // Create a marker file.
+      FileUtils.touch(inProgress);
+    } else {
+      // Marker file exists, which means last run gets interrupted.
+      // Remove inverted index if exists.
+      // For v1 and v2, it's the actual inverted index. For v3, it's the temporary inverted index.
+      FileUtils.deleteQuietly(invertedIndexDir);
+    }
+
+    // Create new inverted index for the column.
+    LOGGER.info("Creating new lucene based inverted index for segment: {}, column: {}", _segmentName, column);
+    int numDocs = columnMetadata.getTotalDocs();
+    String objectType = columnMetadata.getObjectType();
+    Class<? extends PinotObject> pinotObjectClazz;
+    PinotObject pinotObject = null;
+    try {
+      switch (objectType.toUpperCase()) {
+        case "MAP":
+          pinotObjectClazz = MapObject.class;
+          break;
+        case "JSON":
+          pinotObjectClazz = JSONObject.class;
+          break;
+        case "TEXT":
+          pinotObjectClazz = TextObject.class;
+          break;
+        default:
+          // custom object type.
+          pinotObjectClazz = (Class<? extends PinotObject>) Class.forName(objectType);
+      }
+      pinotObject = pinotObjectClazz.getConstructor(new Class[]{}).newInstance(new Object[]{});
+    } catch (Exception e) {
+      LOGGER.error("Error pinot object  for type:{}. Skipping inverted index creation", objectType);
+      return;
+    }
+
+    try (LuceneIndexCreator luceneIndexCreator = new LuceneIndexCreator(columnMetadata, invertedIndexDir)) {
+      try (DataFileReader fwdIndex = getForwardIndexReader(columnMetadata, _segmentWriter)) {
+        if (columnMetadata.isSingleValue()) {
+          // Single-value column.
+          VarByteChunkSingleValueReader svFwdIndex = (VarByteChunkSingleValueReader) fwdIndex;
+          for (int i = 0; i < numDocs; i++) {
+            byte[] bytes = svFwdIndex.getBytes(i);
+
+            pinotObject.init(bytes);
+            luceneIndexCreator.add(pinotObject);
+          }
+        } else {
+          throw new UnsupportedOperationException("Multi Value not supported for complex object types");
+        }
+        luceneIndexCreator.seal();
+      }
+    }
+    String tarGzPath = TarGzCompressionUtils.createTarGzOfDirectory(invertedIndexDir.getAbsolutePath());
+
+    // For v3, write the generated inverted index file into the single file and remove it.
+    if (_segmentVersion == SegmentVersion.v3) {
+      LoaderUtils.writeIndexToV3Format(_segmentWriter, column, new File(tarGzPath), ColumnIndexType.INVERTED_INDEX);
+    }
+
+    // Delete the marker file.
+    FileUtils.deleteQuietly(inProgress);
+
+    LOGGER.info("Created inverted index for segment: {}, column: {}", _segmentName, column);
+  }
+
+  private void createInvertedIndexForSimpleField(ColumnMetadata columnMetadata)
       throws IOException {
     String column = columnMetadata.getColumnName();
 
@@ -146,7 +242,11 @@ public class InvertedIndexHandler {
     int numRows = columnMetadata.getTotalDocs();
     int numBitsPerValue = columnMetadata.getBitsPerElement();
     if (columnMetadata.isSingleValue()) {
-      return new FixedBitSingleValueReader(buffer, numRows, numBitsPerValue);
+      if (columnMetadata.hasDictionary()) {
+        return new FixedBitSingleValueReader(buffer, numRows, numBitsPerValue);
+      } else {
+        return new VarByteChunkSingleValueReader(buffer);
+      }
     } else {
       return new FixedBitMultiValueReader(buffer, numRows, columnMetadata.getTotalNumberOfEntries(), numBitsPerValue);
     }
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/BitmapInvertedIndexReader.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/BitmapInvertedIndexReader.java
index d68af16..d9d830a 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/BitmapInvertedIndexReader.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/BitmapInvertedIndexReader.java
@@ -22,6 +22,8 @@ import java.io.File;
 import java.io.IOException;
 import java.lang.ref.SoftReference;
 import java.nio.ByteBuffer;
+
+import org.apache.pinot.core.common.Predicate;
 import org.apache.pinot.core.segment.memory.PinotDataBuffer;
 import org.roaringbitmap.buffer.ImmutableRoaringBitmap;
 import org.slf4j.Logger;
@@ -52,6 +54,11 @@ public class BitmapInvertedIndexReader implements InvertedIndexReader<ImmutableR
     load(indexDataBuffer);
   }
 
+  @Override
+  public ImmutableRoaringBitmap getDocIds(Predicate predicate) {
+    throw new UnsupportedOperationException("Predicate based evaluation not supported for Bitmap based Indexing scheme");
+  }
+  
   /**
    * {@inheritDoc}
    */
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/InvertedIndexReader.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/InvertedIndexReader.java
index 6a949c0..6b42f4c 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/InvertedIndexReader.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/InvertedIndexReader.java
@@ -20,6 +20,8 @@ package org.apache.pinot.core.segment.index.readers;
 
 import java.io.Closeable;
 
+import org.apache.pinot.core.common.Predicate;
+
 
 public interface InvertedIndexReader<T> extends Closeable {
 
@@ -27,4 +29,10 @@ public interface InvertedIndexReader<T> extends Closeable {
    * Get the document ids for the given dictionary id.
    */
   T getDocIds(int dictId);
+  
+  /**
+   * Get the document id's for the given predicate
+   */
+  T getDocIds(Predicate predicate);
+  
 }
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/LuceneInvertedIndexReader.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/LuceneInvertedIndexReader.java
new file mode 100644
index 0000000..576bcc4
--- /dev/null
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/LuceneInvertedIndexReader.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.core.segment.index.readers;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queryparser.classic.ParseException;
+import org.apache.lucene.queryparser.classic.QueryParser;
+import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode;
+import org.apache.lucene.queryparser.surround.query.FieldsQuery;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.pinot.common.data.FieldSpec.DataType;
+import org.apache.pinot.common.data.FieldSpec.FieldType;
+import org.apache.pinot.common.data.objects.TextObject;
+import org.apache.pinot.core.common.Predicate;
+import org.apache.pinot.core.common.predicate.MatchesPredicate;
+import org.apache.pinot.core.segment.creator.impl.V1Constants;
+import org.apache.pinot.core.segment.index.ColumnMetadata;
+import org.roaringbitmap.buffer.MutableRoaringBitmap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
+public class LuceneInvertedIndexReader implements InvertedIndexReader<MutableRoaringBitmap> {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(LuceneInvertedIndexReader.class);
+  private final IndexSearcher _searcher;
+  private final Analyzer _analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer());
+  private Directory _index;
+
+  /**
+   * TODO: Change this to take PinotDataBuffer, work around for now since Lucene needs actual
+   * directory
+   * @param metadata
+   * @param indexDir
+   * @param metadata
+   */
+  public LuceneInvertedIndexReader(File segmentIndexDir, ColumnMetadata metadata) {
+
+    try {
+      File searchIndexDir = new File(segmentIndexDir.getPath(),
+          metadata.getColumnName() + V1Constants.Indexes.LUCENE_INVERTED_INDEX_DIR);
+      _index = FSDirectory.open(searchIndexDir.toPath());
+      IndexReader reader = DirectoryReader.open(_index);
+      _searcher = new IndexSearcher(reader);
+    } catch (IOException e) {
+      LOGGER.error("Encountered error creating LuceneSearchIndexReader ", e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    _index.close();
+  }
+
+  @Override
+  public MutableRoaringBitmap getDocIds(int dictId) {
+    throw new UnsupportedOperationException(
+        "DictId based evaluation not supported for Lucene based Indexing scheme");
+  }
+
+  @Override
+  public MutableRoaringBitmap getDocIds(Predicate predicate) {
+    MatchesPredicate matchesPredicate = (MatchesPredicate) predicate;
+    MutableRoaringBitmap bitmap =
+        getDocIds(matchesPredicate.getQuery(), matchesPredicate.getQueryOptions());
+    return bitmap;
+  }
+
+  public MutableRoaringBitmap getDocIds(String queryStr, String options) {
+    QueryParser queryParser = new QueryParser(TextObject.DEFAULT_FIELD, _analyzer);
+    Query query = null;
+    try {
+      query = queryParser.parse(queryStr);
+    } catch (ParseException e) {
+      LOGGER.error("Encountered exception while parsing query {}", queryStr, e);
+      throw new RuntimeException(e);
+    }
+    MutableRoaringBitmap bitmap = new MutableRoaringBitmap();
+    try {
+      Collector collector = createCollector(bitmap);
+      _searcher.search(query, collector);
+    } catch (IOException e) {
+      LOGGER.error("Encountered exception while executing search query {}", queryStr, e);
+      throw new RuntimeException(e);
+    }
+    return bitmap;
+  }
+
+  private Collector createCollector(final MutableRoaringBitmap bitmap) {
+    return new LuceneResultCollector(bitmap);
+  }
+
+  public static final class LuceneResultCollector implements Collector {
+    private final MutableRoaringBitmap bitmap;
+
+    public LuceneResultCollector(MutableRoaringBitmap bitmap) {
+      this.bitmap = bitmap;
+    }
+
+    @Override
+    public boolean needsScores() {
+      return false;
+    }
+
+    @Override
+    public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
+      return new LeafCollector() {
+
+        @Override
+        public void setScorer(Scorer scorer) throws IOException {
+          // ignore
+        }
+
+        @Override
+        public void collect(int doc) throws IOException {
+          bitmap.add(doc);
+        }
+      };
+    }
+  }
+}
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/DocIdVirtualColumnProvider.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/DocIdVirtualColumnProvider.java
index 22ce9ad..9662d0e 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/DocIdVirtualColumnProvider.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/DocIdVirtualColumnProvider.java
@@ -21,6 +21,8 @@ package org.apache.pinot.core.segment.virtualcolumn;
 import java.io.IOException;
 import org.apache.pinot.common.data.FieldSpec;
 import org.apache.pinot.common.utils.Pairs;
+import org.apache.pinot.common.utils.Pairs.IntPair;
+import org.apache.pinot.core.common.Predicate;
 import org.apache.pinot.core.io.reader.BaseSingleColumnSingleValueReader;
 import org.apache.pinot.core.io.reader.DataFileReader;
 import org.apache.pinot.core.io.reader.impl.ChunkReaderContext;
@@ -108,7 +110,10 @@ public class DocIdVirtualColumnProvider extends BaseVirtualColumnProvider {
     public Pairs.IntPair getDocIds(int dictId) {
       return new Pairs.IntPair(dictId, dictId);
     }
-
+    @Override
+    public IntPair getDocIds(Predicate predicate) {
+      throw new UnsupportedOperationException("");
+    }
     @Override
     public void close()
         throws IOException {
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/SingleStringVirtualColumnProvider.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/SingleStringVirtualColumnProvider.java
index 6082306..8672fed 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/SingleStringVirtualColumnProvider.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/virtualcolumn/SingleStringVirtualColumnProvider.java
@@ -21,6 +21,8 @@ package org.apache.pinot.core.segment.virtualcolumn;
 import java.io.IOException;
 import org.apache.pinot.common.data.FieldSpec;
 import org.apache.pinot.common.utils.Pairs;
+import org.apache.pinot.common.utils.Pairs.IntPair;
+import org.apache.pinot.core.common.Predicate;
 import org.apache.pinot.core.io.reader.BaseSingleColumnSingleValueReader;
 import org.apache.pinot.core.io.reader.DataFileReader;
 import org.apache.pinot.core.io.reader.impl.v1.SortedIndexReader;
@@ -75,7 +77,10 @@ public abstract class SingleStringVirtualColumnProvider extends BaseVirtualColum
     public SingleStringInvertedIndex(int length) {
       _length = length;
     }
-
+    @Override
+    public IntPair getDocIds(Predicate predicate) {
+      throw new UnsupportedOperationException("");
+    }
     @Override
     public Pairs.IntPair getDocIds(int dictId) {
       if (dictId == 0) {
diff --git a/pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/LuceneIndexClusterIntegrationTest.java b/pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/LuceneIndexClusterIntegrationTest.java
new file mode 100644
index 0000000..ab4d561
--- /dev/null
+++ b/pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/LuceneIndexClusterIntegrationTest.java
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import java.io.File;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import javax.annotation.Nonnull;
+
+import org.apache.avro.file.DataFileWriter;
+import org.apache.avro.generic.GenericData;
+import org.apache.avro.generic.GenericDatumWriter;
+import org.apache.commons.io.FileUtils;
+import org.apache.pinot.common.data.DimensionFieldSpec;
+import org.apache.pinot.common.data.FieldSpec;
+import org.apache.pinot.common.data.FieldSpec.DataType;
+import org.apache.pinot.common.data.Schema;
+import org.apache.pinot.core.indexsegment.generator.SegmentVersion;
+import org.apache.pinot.tools.data.generator.AvroWriter;
+import org.apache.pinot.tools.query.comparison.StarTreeQueryGenerator;
+import org.apache.pinot.util.TestUtils;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.JsonNodeFactory;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import com.google.common.collect.Lists;
+
+public class LuceneIndexClusterIntegrationTest extends BaseClusterIntegrationTest {
+  protected static final String DEFAULT_TABLE_NAME = "myTable";
+  static final long TOTAL_DOCS = 1_000L;
+
+  protected Schema _schema;
+  private StarTreeQueryGenerator _queryGenerator;
+  private String _currentTable;
+
+  @Nonnull
+  @Override
+  protected String getTableName() {
+    return _currentTable;
+  }
+
+  @Nonnull
+  @Override
+  protected String getSchemaFileName() {
+    return null;
+  }
+
+  @BeforeClass
+  public void setUp() throws Exception {
+    TestUtils.ensureDirectoriesExistAndEmpty(_tempDir);
+
+    // Start the Pinot cluster
+    startZk();
+    startController();
+    startBroker();
+    startServers(1);
+
+    _schema = new Schema();
+    FieldSpec jsonFieldSpec = new DimensionFieldSpec();
+    jsonFieldSpec.setDataType(DataType.BYTES);
+    jsonFieldSpec.setDefaultNullValue("{}".getBytes());
+    jsonFieldSpec.setObjectType("JSON");
+    jsonFieldSpec.setName("headers");
+    jsonFieldSpec.setSingleValueField(true);
+    _schema.addField(jsonFieldSpec);
+
+    // Create the tables
+    ArrayList<String> invertedIndexColumns = Lists.newArrayList("headers");
+    addOfflineTable(DEFAULT_TABLE_NAME, null, null, null, null, null, SegmentVersion.v1,
+        invertedIndexColumns, null, null);
+
+    setUpSegmentsAndQueryGenerator();
+
+    // Wait for all documents loaded
+    _currentTable = DEFAULT_TABLE_NAME;
+    waitForAllDocsLoaded(10_000);
+  }
+
+  @Override
+  protected long getCountStarResult() {
+    return TOTAL_DOCS;
+  }
+  
+  protected void setUpSegmentsAndQueryGenerator() throws Exception {
+    org.apache.avro.Schema avroSchema = AvroWriter.getAvroSchema(_schema);
+    DataFileWriter recordWriter =
+        new DataFileWriter<>(new GenericDatumWriter<GenericData.Record>(avroSchema));
+    String parent = "/tmp/luceneTest";
+    File avroFile = new File(parent, "part-" + 0 + ".avro");
+    avroFile.getParentFile().mkdirs();
+    recordWriter.create(avroSchema, avroFile);
+    ObjectMapper mapper = new ObjectMapper();
+    for (int i = 0; i < TOTAL_DOCS; i++) {
+      ObjectNode objectNode = JsonNodeFactory.instance.objectNode();
+      objectNode.put("k1", "value" + i);
+      objectNode.put("k2", "value" + i);
+      String json = mapper.writeValueAsString(objectNode);
+      GenericData.Record record = new GenericData.Record(avroSchema);
+      ByteBuffer byteBuffer = ByteBuffer.wrap(json.getBytes());
+      record.put("headers", byteBuffer);
+      recordWriter.append(record);
+    }
+    recordWriter.close();
+
+    // Unpack the Avro files
+    List<File> avroFiles = Lists.newArrayList(avroFile);
+
+    // Create and upload segments without star tree indexes from Avro data
+    createAndUploadSegments(avroFiles, DEFAULT_TABLE_NAME, false);
+
+  }
+
+  private void createAndUploadSegments(List<File> avroFiles, String tableName,
+      boolean createStarTreeIndex) throws Exception {
+    System.out.println("SEGMENT_DIR" + _segmentDir);
+    TestUtils.ensureDirectoriesExistAndEmpty(_segmentDir, _tarDir);
+
+    ExecutorService executor = Executors.newCachedThreadPool();
+    ClusterIntegrationTestUtils.buildSegmentsFromAvro(avroFiles, 0, _segmentDir, _tarDir, tableName,
+        createStarTreeIndex, null, null, _schema, executor);
+    executor.shutdown();
+    executor.awaitTermination(10, TimeUnit.MINUTES);
+
+    uploadSegments(_tarDir);
+  }
+
+  @Test
+  public void testQueries() throws Exception {
+    String pqlQuery =
+        "Select count(*) from " + DEFAULT_TABLE_NAME + " WHERE headers matches('k1:\"value\\-1\"','')";
+    JsonNode pinotResponse = postQuery(pqlQuery);
+    System.out.println(pinotResponse);
+  }
+
+  @AfterClass
+  public void tearDown() throws Exception {
+    dropOfflineTable(DEFAULT_TABLE_NAME);
+
+    stopServer();
+    stopBroker();
+    stopController();
+    stopZk();
+
+    FileUtils.deleteDirectory(_tempDir);
+  }
+
+}
diff --git a/pinot-perf/pom.xml b/pinot-perf/pom.xml
index 68393be..dd795c1 100644
--- a/pinot-perf/pom.xml
+++ b/pinot-perf/pom.xml
@@ -19,7 +19,8 @@
     under the License.
 
 -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
@@ -101,6 +102,25 @@
       </exclusions>
     </dependency>
     <dependency>
+      <groupId>net.sf.jopt-simple</groupId>
+      <artifactId>jopt-simple</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-core</artifactId>
+      <version>7.6.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-queryparser</artifactId>
+      <version>7.6.0</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-analyzers-common</artifactId>
+      <version>7.6.0</version>
+    </dependency>
+    <dependency>
       <groupId>org.openjdk.jmh</groupId>
       <artifactId>jmh-generator-annprocess</artifactId>
       <version>1.15</version>
@@ -180,15 +200,20 @@
           <binFileExtensions>
             <unix>.sh</unix>
           </binFileExtensions>
-          <!-- Set the target configuration directory to be used in the bin scripts -->
+          <!-- Set the target configuration directory to be used in the bin 
+            scripts -->
           <configurationDirectory>conf</configurationDirectory>
-          <!-- Copy the contents from "/src/main/config" to the target configuration directory in the assembled application -->
+          <!-- Copy the contents from "/src/main/config" to the target configuration 
+            directory in the assembled application -->
           <copyConfigurationDirectory>false</copyConfigurationDirectory>
-          <!-- Include the target configuration directory in the beginning of the classpath declaration in the bin scripts -->
+          <!-- Include the target configuration directory in the beginning 
+            of the classpath declaration in the bin scripts -->
           <includeConfigurationDirectoryInClasspath>false</includeConfigurationDirectoryInClasspath>
           <assembleDirectory>${project.build.directory}/${project.artifactId}-pkg</assembleDirectory>
           <!-- Extra JVM arguments that will be included in the bin scripts -->
-          <extraJvmArguments>-Xms24G -Xmx24G -Dlog4j.configuration=log4j.properties</extraJvmArguments>
+          <extraJvmArguments>-Xms24G -Xmx24G
+            -Dlog4j.configuration=log4j.properties
+          </extraJvmArguments>
           <!-- Generate bin scripts for windows and unix pr default -->
           <platforms>
             <platform>unix</platform>
diff --git a/pinot-perf/src/main/java/org/apache/pinot/perf/LuceneBenchmark.java b/pinot-perf/src/main/java/org/apache/pinot/perf/LuceneBenchmark.java
new file mode 100644
index 0000000..2af2ed5
--- /dev/null
+++ b/pinot-perf/src/main/java/org/apache/pinot/perf/LuceneBenchmark.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.perf;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queryparser.classic.ParseException;
+import org.apache.lucene.queryparser.classic.QueryParser;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.pinot.core.segment.index.readers.LuceneInvertedIndexReader;
+import org.roaringbitmap.buffer.MutableRoaringBitmap;
+
+public class LuceneBenchmark {
+
+  public static void main(String[] args) throws IOException, ParseException {
+    Map<String, Analyzer> analyzerMap = new HashMap<>();
+    Analyzer wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer());
+    Directory index = new RAMDirectory();
+    IndexWriterConfig config = new IndexWriterConfig(wrapper);
+
+    try (IndexWriter writer = new IndexWriter(index, config)) {
+      for (int i = 0; i < 1000; i++) {
+        Document doc = new Document();
+        doc.add(new TextField("k1", "value-" + i, Field.Store.YES));
+        doc.add(new TextField("k2", "value-" + i, Field.Store.YES));
+        writer.addDocument(doc);
+      }
+    }
+    String querystr;
+    querystr = "k1:\"value1?0\"";
+    Query q = new QueryParser("Content", wrapper).parse(querystr);
+    q = new WildcardQuery(new Term("k1", QueryParser.escape("value1*")));
+
+    // 3. searching
+    IndexReader reader = DirectoryReader.open(index);
+    IndexSearcher searcher = new IndexSearcher(reader);
+    MutableRoaringBitmap bitmap = new MutableRoaringBitmap();
+    Collector collector = new LuceneInvertedIndexReader.LuceneResultCollector(bitmap);
+    searcher.search(q, collector);
+
+    // 4. display results
+    System.out.println("Query string: " + querystr);
+    System.out.println("Found " + bitmap.getCardinality() + " hits.");
+
+  }
+}
diff --git a/pinot-tools/src/main/java/org/apache/pinot/tools/perf/ZookeeperLauncher.java b/pinot-tools/src/main/java/org/apache/pinot/tools/perf/ZookeeperLauncher.java
index 838b6d1..c299257 100644
--- a/pinot-tools/src/main/java/org/apache/pinot/tools/perf/ZookeeperLauncher.java
+++ b/pinot-tools/src/main/java/org/apache/pinot/tools/perf/ZookeeperLauncher.java
@@ -36,7 +36,7 @@ public class ZookeeperLauncher {
   private ZkServer _zkServer;
 
   public ZookeeperLauncher() {
-    this("/tmp");
+    this(org.apache.commons.io.FileUtils.getTempDirectoryPath());
   }
 
   public ZookeeperLauncher(String baseTempDir) {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pinot.apache.org
For additional commands, e-mail: commits-help@pinot.apache.org


[incubator-pinot] 06/06: Fixing license header

Posted by xi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xiangfu pushed a commit to branch nested-object-indexing-1
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git

commit bdaa9c12938254819c09c05b7a589a24fd8c860d
Author: Xiang Fu <xi...@alt-chain.io>
AuthorDate: Mon Mar 18 22:27:55 2019 -0700

    Fixing license header
---
 ...VarByteSingleColumnSingleValueReaderWriterTest.java | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/pinot-core/src/test/java/org/apache/pinot/core/io/writer/impl/VarByteSingleColumnSingleValueReaderWriterTest.java b/pinot-core/src/test/java/org/apache/pinot/core/io/writer/impl/VarByteSingleColumnSingleValueReaderWriterTest.java
index 0fc33db..2f1f386 100644
--- a/pinot-core/src/test/java/org/apache/pinot/core/io/writer/impl/VarByteSingleColumnSingleValueReaderWriterTest.java
+++ b/pinot-core/src/test/java/org/apache/pinot/core/io/writer/impl/VarByteSingleColumnSingleValueReaderWriterTest.java
@@ -1,3 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
 package org.apache.pinot.core.io.writer.impl;
 
 import org.apache.pinot.core.io.readerwriter.PinotDataBufferMemoryManager;


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pinot.apache.org
For additional commands, e-mail: commits-help@pinot.apache.org


[incubator-pinot] 05/06: Adding support for bytes type in realtime + nested object indexing

Posted by xi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xiangfu pushed a commit to branch nested-object-indexing-1
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git

commit a230c647551bf5de33646f58edeb690be2e54186
Author: kishore gopalakrishna <g....@gmail.com>
AuthorDate: Tue Feb 19 09:29:40 2019 -0800

    Adding support for bytes type in realtime + nested object indexing
---
 .../org/apache/pinot/common/data/FieldSpec.java    |   1 +
 .../org/apache/pinot/common/data/PinotObject.java  |   2 +-
 .../pinot/common/data/PinotObjectFactory.java      |  67 +++++++++
 .../org/apache/pinot/core/common/Predicate.java    |  28 +---
 .../realtime/LLRealtimeSegmentDataManager.java     |   3 +-
 .../indexsegment/mutable/MutableSegmentImpl.java   | 149 ++++++++++++++++-----
 ...VarByteSingleColumnSingleValueReaderWriter.java | 143 ++++++++++++++++++++
 .../core/realtime/impl/RealtimeSegmentConfig.java  |  18 ++-
 .../invertedindex/RealtimeInvertedIndexReader.java |  41 +++++-
 .../creator/impl/inv/LuceneIndexCreator.java       |  14 +-
 .../loader/invertedindex/InvertedIndexHandler.java |  29 +---
 .../index/readers/LuceneInvertedIndexReader.java   |   9 +-
 ...yteSingleColumnSingleValueReaderWriterTest.java |  31 +++++
 13 files changed, 433 insertions(+), 102 deletions(-)

diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java b/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java
index ad5a5a3..cb43a4c 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java
@@ -457,6 +457,7 @@ public abstract class FieldSpec implements Comparable<FieldSpec>, ConfigNodeLife
         case DOUBLE:
           return Double.BYTES;
         case BYTES:
+        case STRING:
           // TODO: Metric size is only used for Star-tree generation, which is not supported yet.
           return MetricFieldSpec.UNDEFINED_METRIC_SIZE;
         default:
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/PinotObject.java b/pinot-common/src/main/java/org/apache/pinot/common/data/PinotObject.java
index 3f1ca33..9b68f73 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/data/PinotObject.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/data/PinotObject.java
@@ -50,7 +50,7 @@ public interface PinotObject {
   List<String> getPropertyNames();
 
   /**
-   * @param fieldName
+   * @param propertyName
    * @return the value of the property, it can be a single object or a list of objects.
    */
   Object getProperty(String propertyName);
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/PinotObjectFactory.java b/pinot-common/src/main/java/org/apache/pinot/common/data/PinotObjectFactory.java
new file mode 100644
index 0000000..b15dc58
--- /dev/null
+++ b/pinot-common/src/main/java/org/apache/pinot/common/data/PinotObjectFactory.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.data;
+
+import java.util.List;
+import org.apache.pinot.common.data.objects.JSONObject;
+import org.apache.pinot.common.data.objects.MapObject;
+import org.apache.pinot.common.data.objects.TextObject;
+import org.apache.pinot.common.segment.fetcher.HdfsSegmentFetcher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * Factory class that create PinotObject from bytes
+ */
+public class PinotObjectFactory {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(PinotObjectFactory.class);
+
+  public static PinotObject create(FieldSpec spec, byte[] buf) {
+    return create(spec.getObjectType(), buf);
+  }
+
+  public static PinotObject create(String objectType, byte[] buf) {
+
+    Class<? extends PinotObject> pinotObjectClazz;
+    PinotObject pinotObject = null;
+    try {
+      switch (objectType.toUpperCase()) {
+        case "MAP":
+          pinotObjectClazz = MapObject.class;
+          break;
+        case "JSON":
+          pinotObjectClazz = JSONObject.class;
+          break;
+        case "TEXT":
+          pinotObjectClazz = TextObject.class;
+          break;
+        default:
+          // custom object type.
+          pinotObjectClazz = (Class<? extends PinotObject>) Class.forName(objectType);
+      }
+      pinotObject = pinotObjectClazz.getConstructor(new Class[]{}).newInstance(new Object[]{});
+      pinotObject.init(buf);
+    } catch (Exception e) {
+      LOGGER.error("Error pinot object  for type:{}. Skipping inverted index creation", objectType);
+    }
+    return pinotObject;
+  }
+}
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/common/Predicate.java b/pinot-core/src/main/java/org/apache/pinot/core/common/Predicate.java
index 302dd0c..c7fa371 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/common/Predicate.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/common/Predicate.java
@@ -78,7 +78,6 @@ public abstract class Predicate {
 
     Predicate predicate;
     switch (filterType) {
-<<<<<<< HEAD
       case EQUALITY:
         predicate = new EqPredicate(column, value);
         break;
@@ -97,36 +96,11 @@ public abstract class Predicate {
       case IN:
         predicate = new InPredicate(column, value);
         break;
-      case TEXT_MATCH:
+      case MATCHES:
         predicate = new MatchesPredicate(column, value);
         break;
       default:
         throw new UnsupportedOperationException("Unsupported filterType:" + filterType);
-=======
-    case EQUALITY:
-      predicate = new EqPredicate(column, value);
-      break;
-    case RANGE:
-      predicate = new RangePredicate(column, value);
-      break;
-    case REGEXP_LIKE:
-      predicate = new RegexpLikePredicate(column, value);
-      break;
-    case NOT:
-      predicate = new NEqPredicate(column, value);
-      break;
-    case NOT_IN:
-      predicate = new NotInPredicate(column, value);
-      break;
-    case IN:
-      predicate = new InPredicate(column, value);
-      break;
-    case MATCHES:
-      predicate = new MatchesPredicate(column, value);
-      break;
-    default:
-      throw new UnsupportedOperationException("Unsupported filterType:" + filterType);
->>>>>>> Enhancing PQL to support MATCHES predicate, can be used for searching within text, map, json and other complex objects
     }
     return predicate;
   }
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/data/manager/realtime/LLRealtimeSegmentDataManager.java b/pinot-core/src/main/java/org/apache/pinot/core/data/manager/realtime/LLRealtimeSegmentDataManager.java
index 18b1409..683bfec 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/data/manager/realtime/LLRealtimeSegmentDataManager.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/data/manager/realtime/LLRealtimeSegmentDataManager.java
@@ -1067,7 +1067,8 @@ public class LLRealtimeSegmentDataManager extends RealtimeSegmentDataManager {
             .setInvertedIndexColumns(invertedIndexColumns).setRealtimeSegmentZKMetadata(segmentZKMetadata)
             .setOffHeap(_isOffHeap).setMemoryManager(_memoryManager)
             .setStatsHistory(realtimeTableDataManager.getStatsHistory())
-            .setAggregateMetrics(indexingConfig.isAggregateMetrics());
+            .setAggregateMetrics(indexingConfig.isAggregateMetrics())
+            .setConsumerDir(realtimeTableDataManager.getConsumerDir());
 
     // Create message decoder
     _messageDecoder = StreamDecoderProvider.create(_partitionLevelStreamConfig, _schema);
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/mutable/MutableSegmentImpl.java b/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/mutable/MutableSegmentImpl.java
index 1b554e8..6d1170f 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/mutable/MutableSegmentImpl.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/indexsegment/mutable/MutableSegmentImpl.java
@@ -28,15 +28,21 @@ import java.util.Map;
 import java.util.Set;
 import org.apache.pinot.common.config.SegmentPartitionConfig;
 import org.apache.pinot.common.data.FieldSpec;
+import org.apache.pinot.common.data.PinotObject;
+import org.apache.pinot.common.data.PinotObjectFactory;
 import org.apache.pinot.common.data.Schema;
 import org.apache.pinot.common.segment.SegmentMetadata;
 import org.apache.pinot.common.utils.NetUtil;
 import org.apache.pinot.core.data.GenericRow;
 import org.apache.pinot.core.indexsegment.IndexSegmentUtils;
 import org.apache.pinot.core.io.reader.DataFileReader;
+import org.apache.pinot.core.io.readerwriter.BaseSingleColumnMultiValueReaderWriter;
+import org.apache.pinot.core.io.readerwriter.BaseSingleColumnSingleValueReaderWriter;
+import org.apache.pinot.core.io.readerwriter.BaseSingleValueMultiColumnReaderWriter;
 import org.apache.pinot.core.io.readerwriter.PinotDataBufferMemoryManager;
 import org.apache.pinot.core.io.readerwriter.impl.FixedByteSingleColumnMultiValueReaderWriter;
 import org.apache.pinot.core.io.readerwriter.impl.FixedByteSingleColumnSingleValueReaderWriter;
+import org.apache.pinot.core.io.readerwriter.impl.VarByteSingleColumnSingleValueReaderWriter;
 import org.apache.pinot.core.realtime.impl.RealtimeSegmentConfig;
 import org.apache.pinot.core.realtime.impl.RealtimeSegmentStatsHistory;
 import org.apache.pinot.core.realtime.impl.dictionary.MutableDictionary;
@@ -85,6 +91,9 @@ public class MutableSegmentImpl implements MutableSegment {
   private final Map<String, RealtimeInvertedIndexReader> _invertedIndexMap = new HashMap<>();
   private final Map<String, BloomFilterReader> _bloomFilterMap = new HashMap<>();
   private final IdMap<FixedIntArray> _recordIdMap;
+  private final Set<String> _noDictionaryColumns;
+  private final Set<String> _invertedIndexColumns;
+
   private boolean _aggregateMetrics;
 
   private volatile int _numDocsIndexed = 0;
@@ -120,9 +129,9 @@ public class MutableSegmentImpl implements MutableSegment {
     _logger =
         LoggerFactory.getLogger(MutableSegmentImpl.class.getName() + "_" + _segmentName + "_" + config.getStreamName());
 
-    Set<String> noDictionaryColumns = config.getNoDictionaryColumns();
+    _noDictionaryColumns = config.getNoDictionaryColumns();
 
-    Set<String> invertedIndexColumns = config.getInvertedIndexColumns();
+    _invertedIndexColumns = config.getInvertedIndexColumns();
     int avgNumMultiValues = config.getAvgNumMultiValues();
 
     // Initialize for each column
@@ -134,52 +143,85 @@ public class MutableSegmentImpl implements MutableSegment {
       // Only support generating raw index on single-value non-string columns that do not have inverted index while
       // consuming. After consumption completes and the segment is built, all single-value columns can have raw index
       FieldSpec.DataType dataType = fieldSpec.getDataType();
-      int indexColumnSize = FieldSpec.DataType.INT.size();
-      if (noDictionaryColumns.contains(column) && fieldSpec.isSingleValueField()
-          && dataType != FieldSpec.DataType.STRING && !invertedIndexColumns.contains(column)) {
-        // No dictionary
-        indexColumnSize = dataType.size();
-      } else {
+      DataFileReader indexReaderWriter;
+      boolean createDictionary = shouldCreateDictionary(fieldSpec);
+      if (createDictionary) {
         int dictionaryColumnSize;
         if (dataType == FieldSpec.DataType.STRING) {
           dictionaryColumnSize = _statsHistory.getEstimatedAvgColSize(column);
         } else {
           dictionaryColumnSize = dataType.size();
         }
-        String allocationContext = buildAllocationContext(_segmentName, column, V1Constants.Dict.FILE_EXTENSION);
+        String dictAllocationContext = buildAllocationContext(_segmentName, column, V1Constants.Dict.FILE_EXTENSION);
         MutableDictionary dictionary = MutableDictionaryFactory
             .getMutableDictionary(dataType, _offHeap, _memoryManager, dictionaryColumnSize,
-                Math.min(_statsHistory.getEstimatedCardinality(column), _capacity), allocationContext);
+                Math.min(_statsHistory.getEstimatedCardinality(column), _capacity), dictAllocationContext);
         _dictionaryMap.put(column, dictionary);
 
         // Even though the column is defined as 'no-dictionary' in the config, we did create dictionary for consuming segment.
-        noDictionaryColumns.remove(column);
+        _noDictionaryColumns.remove(column);
       }
 
-      DataFileReader indexReaderWriter;
-      if (fieldSpec.isSingleValueField()) {
-        String allocationContext =
-            buildAllocationContext(_segmentName, column, V1Constants.Indexes.UNSORTED_SV_FORWARD_INDEX_FILE_EXTENSION);
-        indexReaderWriter = new FixedByteSingleColumnSingleValueReaderWriter(_capacity, indexColumnSize, _memoryManager,
-            allocationContext);
+      int fwdIndexColumnSize;
+      if (createDictionary) {
+        //dictionary will always be int
+        fwdIndexColumnSize = FieldSpec.DataType.INT.size();
+      } else {
+        fwdIndexColumnSize = fieldSpec.getDataType().size();
+      }
+      //The size is FIXED for primitive data types (INT, LONG, FLOAT, DOUBLE)
+      if (fwdIndexColumnSize > 0) {
+        if (fieldSpec.isSingleValueField()) {
+          String svAllocationContext = buildAllocationContext(_segmentName, column,
+              V1Constants.Indexes.UNSORTED_SV_FORWARD_INDEX_FILE_EXTENSION);
+          indexReaderWriter =
+              new FixedByteSingleColumnSingleValueReaderWriter(_capacity, fwdIndexColumnSize, _memoryManager,
+                  svAllocationContext);
+        } else {
+          // TODO: Fix the bug in MultiValueReaderWriter
+          String mvAllocationContext = buildAllocationContext(_segmentName, column,
+              V1Constants.Indexes.UNSORTED_MV_FORWARD_INDEX_FILE_EXTENSION);
+          indexReaderWriter =
+              new FixedByteSingleColumnMultiValueReaderWriter(MAX_MULTI_VALUES_PER_ROW, avgNumMultiValues, _capacity,
+                  fwdIndexColumnSize, _memoryManager, mvAllocationContext);
+        }
       } else {
-        // TODO: Start with a smaller capacity on FixedByteSingleColumnMultiValueReaderWriter and let it expand
-        String allocationContext =
-            buildAllocationContext(_segmentName, column, V1Constants.Indexes.UNSORTED_MV_FORWARD_INDEX_FILE_EXTENSION);
-        indexReaderWriter =
-            new FixedByteSingleColumnMultiValueReaderWriter(MAX_MULTI_VALUES_PER_ROW, avgNumMultiValues, _capacity,
-                indexColumnSize, _memoryManager, allocationContext);
+        //TODO: Get it from stats
+        int avgColumnSizeInBytes = -1;
+        //For STRING and BYTES, use varByte implementation
+        if (fieldSpec.isSingleValueField()) {
+          String allocationContext = buildAllocationContext(_segmentName, column,
+              V1Constants.Indexes.UNSORTED_SV_FORWARD_INDEX_FILE_EXTENSION);
+          indexReaderWriter =
+              new VarByteSingleColumnSingleValueReaderWriter(_capacity, avgColumnSizeInBytes, _memoryManager,
+                  allocationContext);
+        } else {
+          // TODO: Fix the bug in MultiValueReaderWriter
+          String mvAllocationContext = buildAllocationContext(_segmentName, column,
+              V1Constants.Indexes.UNSORTED_MV_FORWARD_INDEX_FILE_EXTENSION);
+          indexReaderWriter =
+              new FixedByteSingleColumnMultiValueReaderWriter(MAX_MULTI_VALUES_PER_ROW, avgNumMultiValues, _capacity,
+                  avgColumnSizeInBytes, _memoryManager, mvAllocationContext);
+        }
       }
       _indexReaderWriterMap.put(column, indexReaderWriter);
 
-      if (invertedIndexColumns.contains(column)) {
-        _invertedIndexMap.put(column, new RealtimeInvertedIndexReader());
+      if (_invertedIndexColumns.contains(column)) {
+        _invertedIndexMap.put(column, new RealtimeInvertedIndexReader(fieldSpec, config.getConsumerDir()));
       }
     }
 
     // Metric aggregation can be enabled only if config is specified, and all dimensions have dictionary,
     // and no metrics have dictionary. If not enabled, the map returned is null.
-    _recordIdMap = enableMetricsAggregationIfPossible(config, _schema, noDictionaryColumns);
+    _recordIdMap = enableMetricsAggregationIfPossible(config, _schema, _noDictionaryColumns);
+  }
+
+  private boolean shouldCreateDictionary(FieldSpec fieldSpec) {
+    boolean createDictionary = true;
+    String columnName = fieldSpec.getName();
+    //if user has explicitly specified not to createDictionary
+    createDictionary = createDictionary && !_noDictionaryColumns.contains(columnName);
+    return createDictionary;
   }
 
   public SegmentPartitionConfig getSegmentPartitionConfig() {
@@ -209,7 +251,7 @@ public class MutableSegmentImpl implements MutableSegment {
     if (docId == numDocs) {
       // Add forward and inverted indices for new document.
       addForwardIndex(row, docId, dictIdMap);
-      addInvertedIndex(docId, dictIdMap);
+      addInvertedIndex(row, docId, dictIdMap);
       // Update number of document indexed at last to make the latest record queryable
       return _numDocsIndexed++ < _capacity;
     } else {
@@ -270,8 +312,8 @@ public class MutableSegmentImpl implements MutableSegment {
       String column = fieldSpec.getName();
       Object value = row.getValue(column);
       if (fieldSpec.isSingleValueField()) {
-        FixedByteSingleColumnSingleValueReaderWriter indexReaderWriter =
-            (FixedByteSingleColumnSingleValueReaderWriter) _indexReaderWriterMap.get(column);
+        BaseSingleColumnSingleValueReaderWriter indexReaderWriter =
+            (BaseSingleColumnSingleValueReaderWriter) _indexReaderWriterMap.get(column);
         Integer dictId = (Integer) dictIdMap.get(column);
         if (dictId != null) {
           // Column with dictionary
@@ -292,6 +334,12 @@ public class MutableSegmentImpl implements MutableSegment {
             case DOUBLE:
               indexReaderWriter.setDouble(docId, (Double) value);
               break;
+            case STRING:
+              indexReaderWriter.setString(docId, (String) value);
+              break;
+            case BYTES:
+              indexReaderWriter.setBytes(docId, (byte[]) value);
+              break;
             default:
               throw new UnsupportedOperationException(
                   "Unsupported data type: " + dataType + " for no-dictionary column: " + column);
@@ -299,19 +347,56 @@ public class MutableSegmentImpl implements MutableSegment {
         }
       } else {
         int[] dictIds = (int[]) dictIdMap.get(column);
-        ((FixedByteSingleColumnMultiValueReaderWriter) _indexReaderWriterMap.get(column)).setIntArray(docId, dictIds);
+        if (dictIds != null) {
+          ((FixedByteSingleColumnMultiValueReaderWriter) _indexReaderWriterMap.get(column)).setIntArray(docId, dictIds);
+        } else {
+          BaseSingleColumnMultiValueReaderWriter indexReaderWriter =
+              (BaseSingleColumnMultiValueReaderWriter) _indexReaderWriterMap.get(column);
+          // No-dictionary column
+          FieldSpec.DataType dataType = fieldSpec.getDataType();
+          switch (dataType) {
+            case INT:
+              indexReaderWriter.setIntArray(docId, (int[]) value);
+              break;
+            case LONG:
+              indexReaderWriter.setLongArray(docId, (long[]) value);
+              break;
+            case FLOAT:
+              indexReaderWriter.setFloatArray(docId, (float[]) value);
+              break;
+            case DOUBLE:
+              indexReaderWriter.setDoubleArray(docId, (double[]) value);
+              break;
+            case STRING:
+              indexReaderWriter.setStringArray(docId, (String[]) value);
+              break;
+            case BYTES:
+              indexReaderWriter.setBytesArray(docId, (byte[][]) value);
+              break;
+            default:
+              throw new UnsupportedOperationException(
+                  "Unsupported data type: " + dataType + " for no-dictionary column: " + column);
+          }
+        }
       }
     }
   }
 
-  private void addInvertedIndex(int docId, Map<String, Object> dictIdMap) {
+  private void addInvertedIndex(GenericRow row, int docId, Map<String, Object> dictIdMap) {
     // Update inverted index at last
     // NOTE: inverted index have to be updated at last because once it gets updated, the latest record will become
     // queryable
     for (FieldSpec fieldSpec : _schema.getAllFieldSpecs()) {
       String column = fieldSpec.getName();
       RealtimeInvertedIndexReader invertedIndex = _invertedIndexMap.get(column);
-      if (invertedIndex != null) {
+      if (invertedIndex == null) {
+        continue;
+      }
+      if (fieldSpec.getObjectType() != null ) {
+        byte[] value = (byte[]) row.getValue(column);
+        PinotObject pinotObject = PinotObjectFactory.create(fieldSpec, value);
+        invertedIndex.add(docId, pinotObject);
+      } else {
         if (fieldSpec.isSingleValueField()) {
           invertedIndex.add(((Integer) dictIdMap.get(column)), docId);
         } else {
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/io/readerwriter/impl/VarByteSingleColumnSingleValueReaderWriter.java b/pinot-core/src/main/java/org/apache/pinot/core/io/readerwriter/impl/VarByteSingleColumnSingleValueReaderWriter.java
new file mode 100644
index 0000000..687391c
--- /dev/null
+++ b/pinot-core/src/main/java/org/apache/pinot/core/io/readerwriter/impl/VarByteSingleColumnSingleValueReaderWriter.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.core.io.readerwriter.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.core.io.readerwriter.BaseSingleColumnSingleValueReaderWriter;
+import org.apache.pinot.core.io.readerwriter.PinotDataBufferMemoryManager;
+import org.apache.pinot.core.io.writer.impl.DirectMemoryManager;
+import org.apache.pinot.core.segment.creator.impl.V1Constants;
+import org.apache.pinot.core.segment.memory.PinotDataBuffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class VarByteSingleColumnSingleValueReaderWriter extends BaseSingleColumnSingleValueReaderWriter {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(FixedByteSingleColumnSingleValueReaderWriter.class);
+
+  private static int DEFAULT_COLUMN_SIZE_IN_BYTES = 100;
+  private final PinotDataBufferMemoryManager _memoryManager;
+  private final String _allocationContext;
+
+  private FixedByteSingleValueMultiColumnReaderWriter _headerReaderWriter;
+  private List<PinotDataBuffer> _dataBuffers;
+
+  //capacity of the chunk, it can adjust itself based on the avgSize
+  private long _chunkCapacityInBytes = 0;
+  private final int _numRowsPerChunk;
+  private int _avgColumnSizeInBytes;
+
+  //amount of data written to the current buffer
+  private int _currentDataSize = 0;
+  //number of rows written to the current buffer
+  private int _currentBufferRows = 0;
+  //PinotDataBuffer where the actual bytes are written to
+  private PinotDataBuffer _currentDataBuffer;
+  //index pointing to the element in _dataBuffers
+  private int _currentDataBufferId;
+
+  /**
+   * @param numRowsPerChunk Number of rows to pack in one chunk before a new chunk is created.
+   * @param avgColumnSizeInBytes Max Size of column value in bytes. Set this to -1 if its unknown
+   * @param memoryManager Memory manager to be used for allocating memory.
+   * @param allocationContext Allocation allocationContext.
+   */
+  public VarByteSingleColumnSingleValueReaderWriter(int numRowsPerChunk, int avgColumnSizeInBytes,
+      PinotDataBufferMemoryManager memoryManager, String allocationContext) {
+    _avgColumnSizeInBytes = avgColumnSizeInBytes;
+    if (avgColumnSizeInBytes < 0) {
+      _avgColumnSizeInBytes = DEFAULT_COLUMN_SIZE_IN_BYTES;
+    }
+    _numRowsPerChunk = numRowsPerChunk;
+    _memoryManager = memoryManager;
+    _allocationContext = allocationContext;
+    _dataBuffers = new ArrayList<>();
+    //bufferId, Offset, length for each row
+    //we can eliminate the length as an optimization later
+    _headerReaderWriter =
+        new FixedByteSingleValueMultiColumnReaderWriter(_numRowsPerChunk, new int[]{4, 4, 4}, _memoryManager,
+            _allocationContext);
+  }
+
+  @Override
+  public void close()
+      throws IOException {
+    for (PinotDataBuffer buffer : _dataBuffers) {
+      buffer.close();
+    }
+  }
+
+  @Override
+  public void setString(int row, String value) {
+    setBytes(row, StringUtil.encodeUtf8(value));
+  }
+
+  @Override
+  public String getString(int row) {
+    return StringUtil.decodeUtf8(getBytes(row));
+  }
+
+  @Override
+  public void setBytes(int row, byte[] buf) {
+
+    if (_currentDataSize + buf.length >= _chunkCapacityInBytes) {
+      addDataBuffer();
+      System.out.println("Added data buffer row:" + row + " numDataBuffers:" + _dataBuffers.size());
+    }
+    try {
+      _headerReaderWriter.setInt(row, 0, _currentDataBufferId);
+      _headerReaderWriter.setInt(row, 1, _currentDataSize);
+      _headerReaderWriter.setInt(row, 2, buf.length);
+      _currentDataBuffer.readFrom(_currentDataSize, buf);
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+    _currentDataSize = _currentDataSize + buf.length;
+  }
+
+  @Override
+  public byte[] getBytes(int row) {
+    int dataBufferId = _headerReaderWriter.getInt(row, 0);
+    int startOffset = _headerReaderWriter.getInt(row, 1);
+    int length = _headerReaderWriter.getInt(row, 2);
+    byte[] buf = new byte[length];
+    PinotDataBuffer dataBuffer = _dataBuffers.get(dataBufferId);
+    dataBuffer.copyTo(startOffset, buf);
+    return buf;
+  }
+
+  private void addDataBuffer() {
+    //set the avgColumnSize based on the data seen so far.
+    if (_currentDataSize > 0 && _currentBufferRows > 0) {
+      _avgColumnSizeInBytes = _currentDataSize / _currentBufferRows;
+    }
+    _chunkCapacityInBytes = _numRowsPerChunk * _avgColumnSizeInBytes;
+    LOGGER.info("Allocating bytes for: {}, dataBufferSize: {} ", _allocationContext, _chunkCapacityInBytes);
+    PinotDataBuffer dataBuffer = _memoryManager.allocate(_chunkCapacityInBytes, _allocationContext);
+    _dataBuffers.add(dataBuffer);
+    _currentDataBuffer = dataBuffer;
+    //start from 0
+    _currentDataBufferId = _dataBuffers.size() - 1;
+    _currentDataSize = 0;
+  }
+}
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/RealtimeSegmentConfig.java b/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/RealtimeSegmentConfig.java
index 63ebbbb..6388617 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/RealtimeSegmentConfig.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/RealtimeSegmentConfig.java
@@ -39,12 +39,13 @@ public class RealtimeSegmentConfig {
   private final RealtimeSegmentStatsHistory _statsHistory;
   private final SegmentPartitionConfig _segmentPartitionConfig;
   private final boolean _aggregateMetrics;
+  private String _consumerDir;
 
   private RealtimeSegmentConfig(String segmentName, String streamName, Schema schema, int capacity,
       int avgNumMultiValues, Set<String> noDictionaryColumns, Set<String> invertedIndexColumns,
       RealtimeSegmentZKMetadata realtimeSegmentZKMetadata, boolean offHeap, PinotDataBufferMemoryManager memoryManager,
       RealtimeSegmentStatsHistory statsHistory, SegmentPartitionConfig segmentPartitionConfig,
-      boolean aggregateMetrics) {
+      boolean aggregateMetrics, String consumerDir) {
     _segmentName = segmentName;
     _streamName = streamName;
     _schema = schema;
@@ -58,6 +59,7 @@ public class RealtimeSegmentConfig {
     _statsHistory = statsHistory;
     _segmentPartitionConfig = segmentPartitionConfig;
     _aggregateMetrics = aggregateMetrics;
+    _consumerDir = consumerDir;
   }
 
   public String getSegmentName() {
@@ -112,6 +114,10 @@ public class RealtimeSegmentConfig {
     return _aggregateMetrics;
   }
 
+  public String getConsumerDir() {
+    return _consumerDir;
+  }
+
   public static class Builder {
     private String _segmentName;
     private String _streamName;
@@ -126,6 +132,7 @@ public class RealtimeSegmentConfig {
     private RealtimeSegmentStatsHistory _statsHistory;
     private SegmentPartitionConfig _segmentPartitionConfig;
     private boolean _aggregateMetrics = false;
+    private String _consumerDir;
 
     public Builder() {
     }
@@ -195,10 +202,17 @@ public class RealtimeSegmentConfig {
       return this;
     }
 
+    public Builder setConsumerDir(String consumerDir) {
+      _consumerDir = consumerDir;
+      return this;
+    }
+
     public RealtimeSegmentConfig build() {
       return new RealtimeSegmentConfig(_segmentName, _streamName, _schema, _capacity, _avgNumMultiValues,
           _noDictionaryColumns, _invertedIndexColumns, _realtimeSegmentZKMetadata, _offHeap, _memoryManager,
-          _statsHistory, _segmentPartitionConfig, _aggregateMetrics);
+          _statsHistory, _segmentPartitionConfig, _aggregateMetrics, _consumerDir);
     }
+
+
   }
 }
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/invertedindex/RealtimeInvertedIndexReader.java b/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/invertedindex/RealtimeInvertedIndexReader.java
index 4c45850..484eeb3 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/invertedindex/RealtimeInvertedIndexReader.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/realtime/impl/invertedindex/RealtimeInvertedIndexReader.java
@@ -18,24 +18,56 @@
  */
 package org.apache.pinot.core.realtime.impl.invertedindex;
 
+import java.io.File;
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
-
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.pinot.common.data.FieldSpec;
+import org.apache.pinot.common.data.PinotObject;
 import org.apache.pinot.core.common.Predicate;
+import org.apache.pinot.core.segment.creator.impl.V1Constants;
+import org.apache.pinot.core.segment.creator.impl.inv.LuceneIndexCreator;
 import org.apache.pinot.core.segment.index.readers.InvertedIndexReader;
+import org.apache.pinot.core.segment.index.readers.LuceneInvertedIndexReader;
 import org.roaringbitmap.buffer.MutableRoaringBitmap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 public class RealtimeInvertedIndexReader implements InvertedIndexReader<MutableRoaringBitmap> {
+  private static final Logger LOGGER = LoggerFactory.getLogger(RealtimeInvertedIndexReader.class);
+
   private final List<ThreadSafeMutableRoaringBitmap> _bitmaps = new ArrayList<>();
   private final ReentrantReadWriteLock.ReadLock _readLock;
   private final ReentrantReadWriteLock.WriteLock _writeLock;
+  private LuceneInvertedIndexReader _reader;
+  private LuceneIndexCreator _creator;
+  boolean isLuceneInitialized;
 
-  public RealtimeInvertedIndexReader() {
+  public RealtimeInvertedIndexReader(FieldSpec spec, String indexDir) {
     ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
     _readLock = readWriteLock.readLock();
     _writeLock = readWriteLock.writeLock();
+    if (spec.getObjectType() != null) {
+      try {
+        File outputDirectory = new File(indexDir, spec.getName() + V1Constants.Indexes.LUCENE_INVERTED_INDEX_DIR);
+        _creator = new LuceneIndexCreator(spec.getObjectType(), outputDirectory);
+        _reader = new LuceneInvertedIndexReader(DirectoryReader.open(_creator.getIndexDirectory()));
+        LOGGER.info("Initializing Lucene for column:{}", spec.getName());
+        isLuceneInitialized = true;
+      } catch (IOException e) {
+        LOGGER.error("Error initializing Lucene for column:{}", spec.getName(), e);
+      }
+    }
+  }
+
+  /**
+   * Add the document id to the bitmap for the given dictionary id.
+   */
+  public void add(int docId, PinotObject pinotObject) {
+    _creator.add(pinotObject);
   }
 
   /**
@@ -56,11 +88,12 @@ public class RealtimeInvertedIndexReader implements InvertedIndexReader<MutableR
       _bitmaps.get(dictId).checkAndAdd(docId);
     }
   }
-  
+
   @Override
   public MutableRoaringBitmap getDocIds(Predicate predicate) {
-    throw new UnsupportedOperationException("Predicate:"+ predicate + " is not supported");
+    return _reader.getDocIds(predicate);
   }
+
   @Override
   public MutableRoaringBitmap getDocIds(int dictId) {
     ThreadSafeMutableRoaringBitmap bitmap;
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/LuceneIndexCreator.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/LuceneIndexCreator.java
index 6622b57..87f9f56 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/LuceneIndexCreator.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/inv/LuceneIndexCreator.java
@@ -52,17 +52,16 @@ public class LuceneIndexCreator implements InvertedIndexCreator {
   private final IndexWriter _writer;
   private final IndexWriterConfig _indexWriterConfig;
   private final Directory _indexDirectory;
-  // TODO:Figure out a way to avoid this
-  boolean _isText = false;
+  private String _objectType;
 
-  public LuceneIndexCreator(ColumnMetadata columnMetadata, File outputDirectory) {
+  public LuceneIndexCreator(String objectType, File outputDirectory) {
+    _objectType = objectType;
     // TODO: Get IndexConfig and set the different analyzer for each field by default we set
     // StandardAnalyzer and use TextField. This can be expensive and inefficient if all we need is
     // exact match. See keyword analyzer
     _analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer());
     _indexWriterConfig = new IndexWriterConfig(_analyzer);
     _indexWriterConfig.setRAMBufferSizeMB(MAX_BUFFER_SIZE_MB);
-    _isText = "TEXT".equalsIgnoreCase(columnMetadata.getObjectType());
     try {
       _indexDirectory = FSDirectory.open(outputDirectory.toPath());
       _writer = new IndexWriter(_indexDirectory, _indexWriterConfig);
@@ -72,6 +71,10 @@ public class LuceneIndexCreator implements InvertedIndexCreator {
     }
   }
 
+  public Directory getIndexDirectory() {
+    return _indexDirectory;
+  }
+
   @Override
   public void add(int dictId) {
     throw new UnsupportedOperationException(
@@ -91,9 +94,6 @@ public class LuceneIndexCreator implements InvertedIndexCreator {
     List<String> propertyNames = object.getPropertyNames();
     for (String propertyName : propertyNames) {
       Field field;
-      // TODO: Figure out a way to avoid special casing Text, have a way to get propertyType from
-      // pinotObject?
-      // TODO: Handle list field
       Object value = object.getProperty(propertyName);
       if (value.getClass().isAssignableFrom(List.class)) {
         List<?> list = (List<?>) value;
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/loader/invertedindex/InvertedIndexHandler.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/loader/invertedindex/InvertedIndexHandler.java
index b9a2bec..d0e688d 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/loader/invertedindex/InvertedIndexHandler.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/loader/invertedindex/InvertedIndexHandler.java
@@ -26,6 +26,7 @@ import java.util.Set;
 import javax.annotation.Nonnull;
 import org.apache.commons.io.FileUtils;
 import org.apache.pinot.common.data.PinotObject;
+import org.apache.pinot.common.data.PinotObjectFactory;
 import org.apache.pinot.common.data.objects.JSONObject;
 import org.apache.pinot.common.data.objects.MapObject;
 import org.apache.pinot.common.data.objects.TextObject;
@@ -117,38 +118,16 @@ public class InvertedIndexHandler {
     LOGGER.info("Creating new lucene based inverted index for segment: {}, column: {}", _segmentName, column);
     int numDocs = columnMetadata.getTotalDocs();
     String objectType = columnMetadata.getObjectType();
-    Class<? extends PinotObject> pinotObjectClazz;
-    PinotObject pinotObject = null;
-    try {
-      switch (objectType.toUpperCase()) {
-        case "MAP":
-          pinotObjectClazz = MapObject.class;
-          break;
-        case "JSON":
-          pinotObjectClazz = JSONObject.class;
-          break;
-        case "TEXT":
-          pinotObjectClazz = TextObject.class;
-          break;
-        default:
-          // custom object type.
-          pinotObjectClazz = (Class<? extends PinotObject>) Class.forName(objectType);
-      }
-      pinotObject = pinotObjectClazz.getConstructor(new Class[]{}).newInstance(new Object[]{});
-    } catch (Exception e) {
-      LOGGER.error("Error pinot object  for type:{}. Skipping inverted index creation", objectType);
-      return;
-    }
 
-    try (LuceneIndexCreator luceneIndexCreator = new LuceneIndexCreator(columnMetadata, invertedIndexDir)) {
+
+    try (LuceneIndexCreator luceneIndexCreator = new LuceneIndexCreator(objectType, invertedIndexDir)) {
       try (DataFileReader fwdIndex = getForwardIndexReader(columnMetadata, _segmentWriter)) {
         if (columnMetadata.isSingleValue()) {
           // Single-value column.
           VarByteChunkSingleValueReader svFwdIndex = (VarByteChunkSingleValueReader) fwdIndex;
           for (int i = 0; i < numDocs; i++) {
             byte[] bytes = svFwdIndex.getBytes(i);
-
-            pinotObject.init(bytes);
+            PinotObject pinotObject = PinotObjectFactory.create(objectType, bytes);
             luceneIndexCreator.add(pinotObject);
           }
         } else {
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/LuceneInvertedIndexReader.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/LuceneInvertedIndexReader.java
index 576bcc4..e4b8850 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/LuceneInvertedIndexReader.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/readers/LuceneInvertedIndexReader.java
@@ -65,8 +65,7 @@ public class LuceneInvertedIndexReader implements InvertedIndexReader<MutableRoa
   /**
    * TODO: Change this to take PinotDataBuffer, work around for now since Lucene needs actual
    * directory
-   * @param metadata
-   * @param indexDir
+   * @param segmentIndexDir
    * @param metadata
    */
   public LuceneInvertedIndexReader(File segmentIndexDir, ColumnMetadata metadata) {
@@ -83,6 +82,10 @@ public class LuceneInvertedIndexReader implements InvertedIndexReader<MutableRoa
     }
   }
 
+  public LuceneInvertedIndexReader(IndexReader reader) {
+    _searcher = new IndexSearcher(reader);
+  }
+
   @Override
   public void close() throws IOException {
     _index.close();
@@ -104,7 +107,7 @@ public class LuceneInvertedIndexReader implements InvertedIndexReader<MutableRoa
 
   public MutableRoaringBitmap getDocIds(String queryStr, String options) {
     QueryParser queryParser = new QueryParser(TextObject.DEFAULT_FIELD, _analyzer);
-    Query query = null;
+    Query query;
     try {
       query = queryParser.parse(queryStr);
     } catch (ParseException e) {
diff --git a/pinot-core/src/test/java/org/apache/pinot/core/io/writer/impl/VarByteSingleColumnSingleValueReaderWriterTest.java b/pinot-core/src/test/java/org/apache/pinot/core/io/writer/impl/VarByteSingleColumnSingleValueReaderWriterTest.java
new file mode 100644
index 0000000..0fc33db
--- /dev/null
+++ b/pinot-core/src/test/java/org/apache/pinot/core/io/writer/impl/VarByteSingleColumnSingleValueReaderWriterTest.java
@@ -0,0 +1,31 @@
+package org.apache.pinot.core.io.writer.impl;
+
+import org.apache.pinot.core.io.readerwriter.PinotDataBufferMemoryManager;
+import org.apache.pinot.core.io.readerwriter.impl.VarByteSingleColumnSingleValueReaderWriter;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+
+public class VarByteSingleColumnSingleValueReaderWriterTest {
+
+  @Test
+  public void testSimple() {
+    VarByteSingleColumnSingleValueReaderWriter readerWriter;
+    PinotDataBufferMemoryManager mem = new DirectMemoryManager("test");
+    readerWriter = new VarByteSingleColumnSingleValueReaderWriter(100, -1, mem, "test");
+
+    for (int i = 0; i < 10000; i++) {
+      String data = "TEST-" + i;
+      readerWriter.setBytes(i, data.getBytes());
+    }
+    boolean passed = true;
+    for (int i = 0; i < 10000; i++) {
+      byte[] data = readerWriter.getBytes(i);
+      if (!new String(data).equals("TEST-" + i)) {
+        passed = false;
+        break;
+      }
+    }
+    Assert.assertTrue(passed);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pinot.apache.org
For additional commands, e-mail: commits-help@pinot.apache.org


[incubator-pinot] 01/06: Adding support for MATCHES Predicate

Posted by xi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xiangfu pushed a commit to branch nested-object-indexing-1
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git

commit a1d2d690221686a8073dfbadd015d5eecea0a6a4
Author: kishore gopalakrishna <g....@gmail.com>
AuthorDate: Fri Jan 18 16:05:45 2019 -0800

    Adding support for MATCHES Predicate
---
 .../antlr4/org/apache/pinot/pql/parsers/PQL2.g4    |   5 +
 .../apache/pinot/common/config/Deserializer.java   |   5 +-
 .../pinot/common/request/AggregationInfo.java      |  73 +++++------
 .../apache/pinot/common/request/BrokerRequest.java | 131 +++++++++----------
 .../pinot/common/request/FilterOperator.java       |  32 ++---
 .../apache/pinot/common/request/FilterQuery.java   |  87 ++++++-------
 .../pinot/common/request/FilterQueryMap.java       |  64 +++++----
 .../org/apache/pinot/common/request/GroupBy.java   | 137 +++++++++----------
 .../pinot/common/request/HavingFilterQuery.java    |  87 ++++++-------
 .../pinot/common/request/HavingFilterQueryMap.java |  62 ++++-----
 .../pinot/common/request/InstanceRequest.java      | 111 ++++++++--------
 .../apache/pinot/common/request/QuerySource.java   |  60 ++++-----
 .../org/apache/pinot/common/request/QueryType.java |  77 +++++------
 .../org/apache/pinot/common/request/Selection.java | 145 ++++++++++-----------
 .../apache/pinot/common/request/SelectionSort.java |  65 +++++----
 .../pinot/common/response/ProcessingException.java |  66 +++++-----
 .../apache/pinot/pql/parsers/Pql2AstListener.java  |  11 ++
 .../parsers/pql2/ast/MatchesPredicateAstNode.java  |  62 +++++++++
 .../org/apache/pinot/core/common/Predicate.java    |  35 ++++-
 .../core/common/predicate/MatchesPredicate.java    |  50 +++++++
 .../MatchesPredicateEvaluatorFactory.java          |  69 ++++++++++
 .../predicate/PredicateEvaluatorProvider.java      |   6 +-
 22 files changed, 787 insertions(+), 653 deletions(-)

diff --git a/pinot-common/src/main/antlr4/org/apache/pinot/pql/parsers/PQL2.g4 b/pinot-common/src/main/antlr4/org/apache/pinot/pql/parsers/PQL2.g4
index 5086182..17652ca 100644
--- a/pinot-common/src/main/antlr4/org/apache/pinot/pql/parsers/PQL2.g4
+++ b/pinot-common/src/main/antlr4/org/apache/pinot/pql/parsers/PQL2.g4
@@ -77,6 +77,7 @@ predicate:
   | betweenClause                         # BetweenPredicate
   | isClause                              # IsPredicate
   | regexpLikeClause                      # RegexpLikePredicate
+  | matchesClause                         # MatchesPredicate  
   ;
 
 inClause:
@@ -95,6 +96,9 @@ betweenClause:
 regexpLikeClause:
   REGEXP_LIKE '(' expression ',' literal ')';
 
+matchesClause:
+  expression MATCHES '(' literal ',' literal ')';
+
 booleanOperator: OR | AND;
 
 groupByClause: GROUP BY groupByList;
@@ -131,6 +135,7 @@ LIMIT: L I M I T;
 NOT : N O T;
 OR: O R;
 REGEXP_LIKE: R E G E X P '_' L I K E;
+MATCHES: M A T C H E S;
 ORDER: O R D E R;
 SELECT: S E L E C T;
 TOP: T O P;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/config/Deserializer.java b/pinot-common/src/main/java/org/apache/pinot/common/config/Deserializer.java
index 93c4d84..63646c4 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/config/Deserializer.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/config/Deserializer.java
@@ -404,8 +404,9 @@ public class Deserializer {
     config = config.resolve();
 
     try {
-      return deserialize(clazz, io.vavr.collection.HashSet.ofAll(config.entrySet())
-          .toMap(entry -> Tuple.of(entry.getKey(), entry.getValue().unwrapped())), "");
+      Map<String, Object> map = io.vavr.collection.HashSet.ofAll(config.entrySet())
+          .toMap(entry -> Tuple.of(entry.getKey(), entry.getValue().unwrapped()));
+      return deserialize(clazz, map, "");
     } catch (Exception e) {
       Utils.rethrowException(e);
       return null;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/AggregationInfo.java b/pinot-common/src/main/java/org/apache/pinot/common/request/AggregationInfo.java
index aa0b3db..ab129ff 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/AggregationInfo.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/AggregationInfo.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,29 +6,40 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  *  Aggregation
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-8-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class AggregationInfo implements org.apache.thrift.TBase<AggregationInfo, AggregationInfo._Fields>, java.io.Serializable, Cloneable, Comparable<AggregationInfo> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AggregationInfo");
 
@@ -135,13 +128,13 @@ public class AggregationInfo implements org.apache.thrift.TBase<AggregationInfo,
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.AGGREGATION_TYPE, new org.apache.thrift.meta_data.FieldMetaData("aggregationType", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.AGGREGATION_TYPE, new org.apache.thrift.meta_data.FieldMetaData("aggregationType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.AGGREGATION_PARAMS, new org.apache.thrift.meta_data.FieldMetaData("aggregationParams", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
+    tmpMap.put(_Fields.AGGREGATION_PARAMS, new org.apache.thrift.meta_data.FieldMetaData("aggregationParams", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.IS_IN_SELECT_LIST, new org.apache.thrift.meta_data.FieldMetaData("isInSelectList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.IS_IN_SELECT_LIST, new org.apache.thrift.meta_data.FieldMetaData("isInSelectList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AggregationInfo.class, metaDataMap);
@@ -506,7 +499,7 @@ public class AggregationInfo implements org.apache.thrift.TBase<AggregationInfo,
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -514,7 +507,7 @@ public class AggregationInfo implements org.apache.thrift.TBase<AggregationInfo,
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.aggregationType = iprot.readString();
               struct.setAggregationTypeIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -534,7 +527,7 @@ public class AggregationInfo implements org.apache.thrift.TBase<AggregationInfo,
                 iprot.readMapEnd();
               }
               struct.setAggregationParamsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -542,7 +535,7 @@ public class AggregationInfo implements org.apache.thrift.TBase<AggregationInfo,
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.isInSelectList = iprot.readBool();
               struct.setIsInSelectListIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/BrokerRequest.java b/pinot-common/src/main/java/org/apache/pinot/common/request/BrokerRequest.java
index c95c782..3ff9ad1 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/BrokerRequest.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/BrokerRequest.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,29 +6,40 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  * Broker Query
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-8-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, BrokerRequest._Fields>, java.io.Serializable, Cloneable, Comparable<BrokerRequest> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BrokerRequest");
 
@@ -200,42 +193,42 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.QUERY_TYPE, new org.apache.thrift.meta_data.FieldMetaData("queryType", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.QUERY_TYPE, new org.apache.thrift.meta_data.FieldMetaData("queryType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, QueryType.class)));
-    tmpMap.put(_Fields.QUERY_SOURCE, new org.apache.thrift.meta_data.FieldMetaData("querySource", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.QUERY_SOURCE, new org.apache.thrift.meta_data.FieldMetaData("querySource", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, QuerySource.class)));
-    tmpMap.put(_Fields.TIME_INTERVAL, new org.apache.thrift.meta_data.FieldMetaData("timeInterval", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.TIME_INTERVAL, new org.apache.thrift.meta_data.FieldMetaData("timeInterval", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.DURATION, new org.apache.thrift.meta_data.FieldMetaData("duration", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.DURATION, new org.apache.thrift.meta_data.FieldMetaData("duration", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.FILTER_QUERY, new org.apache.thrift.meta_data.FieldMetaData("filterQuery", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.FILTER_QUERY, new org.apache.thrift.meta_data.FieldMetaData("filterQuery", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FilterQuery.class)));
-    tmpMap.put(_Fields.AGGREGATIONS_INFO, new org.apache.thrift.meta_data.FieldMetaData("aggregationsInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.AGGREGATIONS_INFO, new org.apache.thrift.meta_data.FieldMetaData("aggregationsInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AggregationInfo.class))));
-    tmpMap.put(_Fields.GROUP_BY, new org.apache.thrift.meta_data.FieldMetaData("groupBy", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.GROUP_BY, new org.apache.thrift.meta_data.FieldMetaData("groupBy", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GroupBy.class)));
-    tmpMap.put(_Fields.SELECTIONS, new org.apache.thrift.meta_data.FieldMetaData("selections", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.SELECTIONS, new org.apache.thrift.meta_data.FieldMetaData("selections", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Selection.class)));
-    tmpMap.put(_Fields.FILTER_SUB_QUERY_MAP, new org.apache.thrift.meta_data.FieldMetaData("filterSubQueryMap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.FILTER_SUB_QUERY_MAP, new org.apache.thrift.meta_data.FieldMetaData("filterSubQueryMap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FilterQueryMap.class)));
-    tmpMap.put(_Fields.BUCKET_HASH_KEY, new org.apache.thrift.meta_data.FieldMetaData("bucketHashKey", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.BUCKET_HASH_KEY, new org.apache.thrift.meta_data.FieldMetaData("bucketHashKey", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.ENABLE_TRACE, new org.apache.thrift.meta_data.FieldMetaData("enableTrace", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.ENABLE_TRACE, new org.apache.thrift.meta_data.FieldMetaData("enableTrace", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    tmpMap.put(_Fields.RESPONSE_FORMAT, new org.apache.thrift.meta_data.FieldMetaData("responseFormat", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.RESPONSE_FORMAT, new org.apache.thrift.meta_data.FieldMetaData("responseFormat", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.DEBUG_OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("debugOptions", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
+    tmpMap.put(_Fields.DEBUG_OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("debugOptions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.QUERY_OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("queryOptions", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
+    tmpMap.put(_Fields.QUERY_OPTIONS, new org.apache.thrift.meta_data.FieldMetaData("queryOptions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.HAVING_FILTER_QUERY, new org.apache.thrift.meta_data.FieldMetaData("havingFilterQuery", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.HAVING_FILTER_QUERY, new org.apache.thrift.meta_data.FieldMetaData("havingFilterQuery", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HavingFilterQuery.class)));
-    tmpMap.put(_Fields.HAVING_FILTER_SUB_QUERY_MAP, new org.apache.thrift.meta_data.FieldMetaData("havingFilterSubQueryMap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.HAVING_FILTER_SUB_QUERY_MAP, new org.apache.thrift.meta_data.FieldMetaData("havingFilterSubQueryMap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HavingFilterQueryMap.class)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BrokerRequest.class, metaDataMap);
@@ -1617,7 +1610,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -1626,7 +1619,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
               struct.queryType = new QueryType();
               struct.queryType.read(iprot);
               struct.setQueryTypeIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1635,7 +1628,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
               struct.querySource = new QuerySource();
               struct.querySource.read(iprot);
               struct.setQuerySourceIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1643,7 +1636,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.timeInterval = iprot.readString();
               struct.setTimeIntervalIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1651,7 +1644,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.duration = iprot.readString();
               struct.setDurationIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1660,7 +1653,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
               struct.filterQuery = new FilterQuery();
               struct.filterQuery.read(iprot);
               struct.setFilterQueryIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1679,7 +1672,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
                 iprot.readListEnd();
               }
               struct.setAggregationsInfoIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1688,7 +1681,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
               struct.groupBy = new GroupBy();
               struct.groupBy.read(iprot);
               struct.setGroupByIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1697,7 +1690,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
               struct.selections = new Selection();
               struct.selections.read(iprot);
               struct.setSelectionsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1706,7 +1699,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
               struct.filterSubQueryMap = new FilterQueryMap();
               struct.filterSubQueryMap.read(iprot);
               struct.setFilterSubQueryMapIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1714,7 +1707,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.bucketHashKey = iprot.readString();
               struct.setBucketHashKeyIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1722,7 +1715,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.enableTrace = iprot.readBool();
               struct.setEnableTraceIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1730,7 +1723,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.responseFormat = iprot.readString();
               struct.setResponseFormatIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1750,7 +1743,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
                 iprot.readMapEnd();
               }
               struct.setDebugOptionsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1770,7 +1763,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
                 iprot.readMapEnd();
               }
               struct.setQueryOptionsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1779,7 +1772,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
               struct.havingFilterQuery = new HavingFilterQuery();
               struct.havingFilterQuery.read(iprot);
               struct.setHavingFilterQueryIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -1788,7 +1781,7 @@ public class BrokerRequest implements org.apache.thrift.TBase<BrokerRequest, Bro
               struct.havingFilterSubQueryMap = new HavingFilterQueryMap();
               struct.havingFilterSubQueryMap.read(iprot);
               struct.setHavingFilterSubQueryMapIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterOperator.java b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterOperator.java
index 797aad5..a2f4351 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterOperator.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterOperator.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,10 +6,15 @@
  */
 package org.apache.pinot.common.request;
 
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
 /**
  * AUTO GENERATED: DO NOT EDIT
  * Filter Operator
- *
+ * 
  */
 public enum FilterOperator implements org.apache.thrift.TEnum {
   AND(0),
@@ -37,7 +24,8 @@ public enum FilterOperator implements org.apache.thrift.TEnum {
   RANGE(4),
   REGEXP_LIKE(5),
   NOT_IN(6),
-  IN(7);
+  IN(7),
+  TEXT_MATCH(8);
 
   private final int value;
 
@@ -56,7 +44,7 @@ public enum FilterOperator implements org.apache.thrift.TEnum {
    * Find a the enum type by its integer value, as defined in the Thrift IDL.
    * @return null if the value is not found.
    */
-  public static FilterOperator findByValue(int value) {
+  public static FilterOperator findByValue(int value) { 
     switch (value) {
       case 0:
         return AND;
@@ -74,6 +62,8 @@ public enum FilterOperator implements org.apache.thrift.TEnum {
         return NOT_IN;
       case 7:
         return IN;
+      case 8:
+        return TEXT_MATCH;
       default:
         return null;
     }
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQuery.java b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQuery.java
index 54a8141..3e6b8fe 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQuery.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQuery.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,29 +6,40 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  * Filter query
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-5-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQuery._Fields>, java.io.Serializable, Cloneable, Comparable<FilterQuery> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FilterQuery");
 
@@ -77,7 +70,7 @@ public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQ
     COLUMN((short)2, "column"),
     VALUE((short)3, "value"),
     /**
-     *
+     * 
      * @see FilterOperator
      */
     OPERATOR((short)4, "operator"),
@@ -152,17 +145,17 @@ public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQ
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.REQUIRED,
+    tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.OPERATOR, new org.apache.thrift.meta_data.FieldMetaData("operator", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.OPERATOR, new org.apache.thrift.meta_data.FieldMetaData("operator", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, FilterOperator.class)));
-    tmpMap.put(_Fields.NESTED_FILTER_QUERY_IDS, new org.apache.thrift.meta_data.FieldMetaData("nestedFilterQueryIds", org.apache.thrift.TFieldRequirementType.DEFAULT,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.NESTED_FILTER_QUERY_IDS, new org.apache.thrift.meta_data.FieldMetaData("nestedFilterQueryIds", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FilterQuery.class, metaDataMap);
@@ -309,7 +302,7 @@ public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQ
   }
 
   /**
-   *
+   * 
    * @see FilterOperator
    */
   public FilterOperator getOperator() {
@@ -317,7 +310,7 @@ public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQ
   }
 
   /**
-   *
+   * 
    * @see FilterOperator
    */
   public void setOperator(FilterOperator operator) {
@@ -719,7 +712,7 @@ public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQ
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -727,7 +720,7 @@ public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQ
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.id = iprot.readI32();
               struct.setIdIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -735,7 +728,7 @@ public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQ
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.column = iprot.readString();
               struct.setColumnIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -753,7 +746,7 @@ public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQ
                 iprot.readListEnd();
               }
               struct.setValueIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -761,7 +754,7 @@ public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQ
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.operator = org.apache.pinot.common.request.FilterOperator.findByValue(iprot.readI32());
               struct.setOperatorIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -779,7 +772,7 @@ public class FilterQuery implements org.apache.thrift.TBase<FilterQuery, FilterQ
                 iprot.readListEnd();
               }
               struct.setNestedFilterQueryIdsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQueryMap.java b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQueryMap.java
index 8754c80..2a15e98 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQueryMap.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQueryMap.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,28 +6,40 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  * Filter Query is nested but thrift stable version does not support yet (The support is there in top of the trunk but no released jars. Two concerns : stability and onus of maintaining a stable point. Also, its pretty difficult to compile thrift in Linkedin software development environment which is not geared towards c++ dev. Hence, the )
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-5-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class FilterQueryMap implements org.apache.thrift.TBase<FilterQueryMap, FilterQueryMap._Fields>, java.io.Serializable, Cloneable, Comparable<FilterQueryMap> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FilterQueryMap");
 
@@ -122,9 +116,9 @@ public class FilterQueryMap implements org.apache.thrift.TBase<FilterQueryMap, F
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.FILTER_QUERY_MAP, new org.apache.thrift.meta_data.FieldMetaData("filterQueryMap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32),
+    tmpMap.put(_Fields.FILTER_QUERY_MAP, new org.apache.thrift.meta_data.FieldMetaData("filterQueryMap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32), 
             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FilterQuery.class))));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FilterQueryMap.class, metaDataMap);
@@ -355,7 +349,7 @@ public class FilterQueryMap implements org.apache.thrift.TBase<FilterQueryMap, F
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -376,7 +370,7 @@ public class FilterQueryMap implements org.apache.thrift.TBase<FilterQueryMap, F
                 iprot.readMapEnd();
               }
               struct.setFilterQueryMapIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/GroupBy.java b/pinot-common/src/main/java/org/apache/pinot/common/request/GroupBy.java
index 134eb6b..6381642 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/GroupBy.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/GroupBy.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,29 +6,40 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  * GroupBy
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-5-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class GroupBy implements org.apache.thrift.TBase<GroupBy, GroupBy._Fields>, java.io.Serializable, Cloneable, Comparable<GroupBy> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GroupBy");
 
@@ -135,13 +128,13 @@ public class GroupBy implements org.apache.thrift.TBase<GroupBy, GroupBy._Fields
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.TOP_N, new org.apache.thrift.meta_data.FieldMetaData("topN", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.TOP_N, new org.apache.thrift.meta_data.FieldMetaData("topN", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.EXPRESSIONS, new org.apache.thrift.meta_data.FieldMetaData("expressions", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.EXPRESSIONS, new org.apache.thrift.meta_data.FieldMetaData("expressions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GroupBy.class, metaDataMap);
@@ -526,25 +519,25 @@ public class GroupBy implements org.apache.thrift.TBase<GroupBy, GroupBy._Fields
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
           case 1: // COLUMNS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list36 = iprot.readListBegin();
-                struct.columns = new ArrayList<String>(_list36.size);
-                String _elem37;
-                for (int _i38 = 0; _i38 < _list36.size; ++_i38)
+                org.apache.thrift.protocol.TList _list62 = iprot.readListBegin();
+                struct.columns = new ArrayList<String>(_list62.size);
+                String _elem63;
+                for (int _i64 = 0; _i64 < _list62.size; ++_i64)
                 {
-                  _elem37 = iprot.readString();
-                  struct.columns.add(_elem37);
+                  _elem63 = iprot.readString();
+                  struct.columns.add(_elem63);
                 }
                 iprot.readListEnd();
               }
               struct.setColumnsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -552,25 +545,25 @@ public class GroupBy implements org.apache.thrift.TBase<GroupBy, GroupBy._Fields
             if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
               struct.topN = iprot.readI64();
               struct.setTopNIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 3: // EXPRESSIONS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list39 = iprot.readListBegin();
-                struct.expressions = new ArrayList<String>(_list39.size);
-                String _elem40;
-                for (int _i41 = 0; _i41 < _list39.size; ++_i41)
+                org.apache.thrift.protocol.TList _list65 = iprot.readListBegin();
+                struct.expressions = new ArrayList<String>(_list65.size);
+                String _elem66;
+                for (int _i67 = 0; _i67 < _list65.size; ++_i67)
                 {
-                  _elem40 = iprot.readString();
-                  struct.expressions.add(_elem40);
+                  _elem66 = iprot.readString();
+                  struct.expressions.add(_elem66);
                 }
                 iprot.readListEnd();
               }
               struct.setExpressionsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -592,9 +585,9 @@ public class GroupBy implements org.apache.thrift.TBase<GroupBy, GroupBy._Fields
           oprot.writeFieldBegin(COLUMNS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.columns.size()));
-            for (String _iter42 : struct.columns)
+            for (String _iter68 : struct.columns)
             {
-              oprot.writeString(_iter42);
+              oprot.writeString(_iter68);
             }
             oprot.writeListEnd();
           }
@@ -611,9 +604,9 @@ public class GroupBy implements org.apache.thrift.TBase<GroupBy, GroupBy._Fields
           oprot.writeFieldBegin(EXPRESSIONS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.expressions.size()));
-            for (String _iter43 : struct.expressions)
+            for (String _iter69 : struct.expressions)
             {
-              oprot.writeString(_iter43);
+              oprot.writeString(_iter69);
             }
             oprot.writeListEnd();
           }
@@ -651,9 +644,9 @@ public class GroupBy implements org.apache.thrift.TBase<GroupBy, GroupBy._Fields
       if (struct.isSetColumns()) {
         {
           oprot.writeI32(struct.columns.size());
-          for (String _iter44 : struct.columns)
+          for (String _iter70 : struct.columns)
           {
-            oprot.writeString(_iter44);
+            oprot.writeString(_iter70);
           }
         }
       }
@@ -663,9 +656,9 @@ public class GroupBy implements org.apache.thrift.TBase<GroupBy, GroupBy._Fields
       if (struct.isSetExpressions()) {
         {
           oprot.writeI32(struct.expressions.size());
-          for (String _iter45 : struct.expressions)
+          for (String _iter71 : struct.expressions)
           {
-            oprot.writeString(_iter45);
+            oprot.writeString(_iter71);
           }
         }
       }
@@ -677,13 +670,13 @@ public class GroupBy implements org.apache.thrift.TBase<GroupBy, GroupBy._Fields
       BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list46 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.columns = new ArrayList<String>(_list46.size);
-          String _elem47;
-          for (int _i48 = 0; _i48 < _list46.size; ++_i48)
+          org.apache.thrift.protocol.TList _list72 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.columns = new ArrayList<String>(_list72.size);
+          String _elem73;
+          for (int _i74 = 0; _i74 < _list72.size; ++_i74)
           {
-            _elem47 = iprot.readString();
-            struct.columns.add(_elem47);
+            _elem73 = iprot.readString();
+            struct.columns.add(_elem73);
           }
         }
         struct.setColumnsIsSet(true);
@@ -694,13 +687,13 @@ public class GroupBy implements org.apache.thrift.TBase<GroupBy, GroupBy._Fields
       }
       if (incoming.get(2)) {
         {
-          org.apache.thrift.protocol.TList _list49 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.expressions = new ArrayList<String>(_list49.size);
-          String _elem50;
-          for (int _i51 = 0; _i51 < _list49.size; ++_i51)
+          org.apache.thrift.protocol.TList _list75 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.expressions = new ArrayList<String>(_list75.size);
+          String _elem76;
+          for (int _i77 = 0; _i77 < _list75.size; ++_i77)
           {
-            _elem50 = iprot.readString();
-            struct.expressions.add(_elem50);
+            _elem76 = iprot.readString();
+            struct.expressions.add(_elem76);
           }
         }
         struct.setExpressionsIsSet(true);
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQuery.java b/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQuery.java
index 3ab0de9..d50e055 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQuery.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQuery.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,29 +6,40 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  * Having Filter query
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-8-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQuery, HavingFilterQuery._Fields>, java.io.Serializable, Cloneable, Comparable<HavingFilterQuery> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HavingFilterQuery");
 
@@ -77,7 +70,7 @@ public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQu
     AGGREGATION_INFO((short)2, "aggregationInfo"),
     VALUE((short)3, "value"),
     /**
-     *
+     * 
      * @see FilterOperator
      */
     OPERATOR((short)4, "operator"),
@@ -152,17 +145,17 @@ public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQu
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.REQUIRED,
+    tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.AGGREGATION_INFO, new org.apache.thrift.meta_data.FieldMetaData("aggregationInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.AGGREGATION_INFO, new org.apache.thrift.meta_data.FieldMetaData("aggregationInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT        , "AggregationInfo")));
-    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.OPERATOR, new org.apache.thrift.meta_data.FieldMetaData("operator", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.OPERATOR, new org.apache.thrift.meta_data.FieldMetaData("operator", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, FilterOperator.class)));
-    tmpMap.put(_Fields.NESTED_FILTER_QUERY_IDS, new org.apache.thrift.meta_data.FieldMetaData("nestedFilterQueryIds", org.apache.thrift.TFieldRequirementType.DEFAULT,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.NESTED_FILTER_QUERY_IDS, new org.apache.thrift.meta_data.FieldMetaData("nestedFilterQueryIds", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HavingFilterQuery.class, metaDataMap);
@@ -309,7 +302,7 @@ public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQu
   }
 
   /**
-   *
+   * 
    * @see FilterOperator
    */
   public FilterOperator getOperator() {
@@ -317,7 +310,7 @@ public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQu
   }
 
   /**
-   *
+   * 
    * @see FilterOperator
    */
   public void setOperator(FilterOperator operator) {
@@ -719,7 +712,7 @@ public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQu
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -727,7 +720,7 @@ public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQu
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.id = iprot.readI32();
               struct.setIdIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -736,7 +729,7 @@ public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQu
               struct.aggregationInfo = new AggregationInfo();
               struct.aggregationInfo.read(iprot);
               struct.setAggregationInfoIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -754,7 +747,7 @@ public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQu
                 iprot.readListEnd();
               }
               struct.setValueIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -762,7 +755,7 @@ public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQu
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.operator = org.apache.pinot.common.request.FilterOperator.findByValue(iprot.readI32());
               struct.setOperatorIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -780,7 +773,7 @@ public class HavingFilterQuery implements org.apache.thrift.TBase<HavingFilterQu
                 iprot.readListEnd();
               }
               struct.setNestedFilterQueryIdsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQueryMap.java b/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQueryMap.java
index bcc0c59..0d3620a 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQueryMap.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQueryMap.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,23 +6,35 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-8-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class HavingFilterQueryMap implements org.apache.thrift.TBase<HavingFilterQueryMap, HavingFilterQueryMap._Fields>, java.io.Serializable, Cloneable, Comparable<HavingFilterQueryMap> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HavingFilterQueryMap");
 
@@ -117,9 +111,9 @@ public class HavingFilterQueryMap implements org.apache.thrift.TBase<HavingFilte
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.FILTER_QUERY_MAP, new org.apache.thrift.meta_data.FieldMetaData("filterQueryMap", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP,
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32),
+    tmpMap.put(_Fields.FILTER_QUERY_MAP, new org.apache.thrift.meta_data.FieldMetaData("filterQueryMap", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32), 
             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HavingFilterQuery.class))));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HavingFilterQueryMap.class, metaDataMap);
@@ -350,7 +344,7 @@ public class HavingFilterQueryMap implements org.apache.thrift.TBase<HavingFilte
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -371,7 +365,7 @@ public class HavingFilterQueryMap implements org.apache.thrift.TBase<HavingFilte
                 iprot.readMapEnd();
               }
               struct.setFilterQueryMapIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/InstanceRequest.java b/pinot-common/src/main/java/org/apache/pinot/common/request/InstanceRequest.java
index b0fd5fa..1bd3d19 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/InstanceRequest.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/InstanceRequest.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,29 +6,40 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  * Instance Request
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-5-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class InstanceRequest implements org.apache.thrift.TBase<InstanceRequest, InstanceRequest._Fields>, java.io.Serializable, Cloneable, Comparable<InstanceRequest> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InstanceRequest");
 
@@ -146,16 +139,16 @@ public class InstanceRequest implements org.apache.thrift.TBase<InstanceRequest,
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.REQUEST_ID, new org.apache.thrift.meta_data.FieldMetaData("requestId", org.apache.thrift.TFieldRequirementType.REQUIRED,
+    tmpMap.put(_Fields.REQUEST_ID, new org.apache.thrift.meta_data.FieldMetaData("requestId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.QUERY, new org.apache.thrift.meta_data.FieldMetaData("query", org.apache.thrift.TFieldRequirementType.REQUIRED,
+    tmpMap.put(_Fields.QUERY, new org.apache.thrift.meta_data.FieldMetaData("query", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, BrokerRequest.class)));
-    tmpMap.put(_Fields.SEARCH_SEGMENTS, new org.apache.thrift.meta_data.FieldMetaData("searchSegments", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.SEARCH_SEGMENTS, new org.apache.thrift.meta_data.FieldMetaData("searchSegments", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.ENABLE_TRACE, new org.apache.thrift.meta_data.FieldMetaData("enableTrace", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.ENABLE_TRACE, new org.apache.thrift.meta_data.FieldMetaData("enableTrace", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    tmpMap.put(_Fields.BROKER_ID, new org.apache.thrift.meta_data.FieldMetaData("brokerId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.BROKER_ID, new org.apache.thrift.meta_data.FieldMetaData("brokerId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(InstanceRequest.class, metaDataMap);
@@ -683,7 +676,7 @@ public class InstanceRequest implements org.apache.thrift.TBase<InstanceRequest,
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -691,7 +684,7 @@ public class InstanceRequest implements org.apache.thrift.TBase<InstanceRequest,
             if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
               struct.requestId = iprot.readI64();
               struct.setRequestIdIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -700,25 +693,25 @@ public class InstanceRequest implements org.apache.thrift.TBase<InstanceRequest,
               struct.query = new BrokerRequest();
               struct.query.read(iprot);
               struct.setQueryIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 3: // SEARCH_SEGMENTS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list96 = iprot.readListBegin();
-                struct.searchSegments = new ArrayList<String>(_list96.size);
-                String _elem97;
-                for (int _i98 = 0; _i98 < _list96.size; ++_i98)
+                org.apache.thrift.protocol.TList _list122 = iprot.readListBegin();
+                struct.searchSegments = new ArrayList<String>(_list122.size);
+                String _elem123;
+                for (int _i124 = 0; _i124 < _list122.size; ++_i124)
                 {
-                  _elem97 = iprot.readString();
-                  struct.searchSegments.add(_elem97);
+                  _elem123 = iprot.readString();
+                  struct.searchSegments.add(_elem123);
                 }
                 iprot.readListEnd();
               }
               struct.setSearchSegmentsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -726,7 +719,7 @@ public class InstanceRequest implements org.apache.thrift.TBase<InstanceRequest,
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.enableTrace = iprot.readBool();
               struct.setEnableTraceIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -734,7 +727,7 @@ public class InstanceRequest implements org.apache.thrift.TBase<InstanceRequest,
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.brokerId = iprot.readString();
               struct.setBrokerIdIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -764,9 +757,9 @@ public class InstanceRequest implements org.apache.thrift.TBase<InstanceRequest,
           oprot.writeFieldBegin(SEARCH_SEGMENTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.searchSegments.size()));
-            for (String _iter99 : struct.searchSegments)
+            for (String _iter125 : struct.searchSegments)
             {
-              oprot.writeString(_iter99);
+              oprot.writeString(_iter125);
             }
             oprot.writeListEnd();
           }
@@ -818,9 +811,9 @@ public class InstanceRequest implements org.apache.thrift.TBase<InstanceRequest,
       if (struct.isSetSearchSegments()) {
         {
           oprot.writeI32(struct.searchSegments.size());
-          for (String _iter100 : struct.searchSegments)
+          for (String _iter126 : struct.searchSegments)
           {
-            oprot.writeString(_iter100);
+            oprot.writeString(_iter126);
           }
         }
       }
@@ -843,13 +836,13 @@ public class InstanceRequest implements org.apache.thrift.TBase<InstanceRequest,
       BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list101 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.searchSegments = new ArrayList<String>(_list101.size);
-          String _elem102;
-          for (int _i103 = 0; _i103 < _list101.size; ++_i103)
+          org.apache.thrift.protocol.TList _list127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.searchSegments = new ArrayList<String>(_list127.size);
+          String _elem128;
+          for (int _i129 = 0; _i129 < _list127.size; ++_i129)
           {
-            _elem102 = iprot.readString();
-            struct.searchSegments.add(_elem102);
+            _elem128 = iprot.readString();
+            struct.searchSegments.add(_elem128);
           }
         }
         struct.setSearchSegmentsIsSet(true);
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/QuerySource.java b/pinot-common/src/main/java/org/apache/pinot/common/request/QuerySource.java
index b4d4e3b..9c63591 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/QuerySource.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/QuerySource.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,28 +6,40 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  * Query source
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-5-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class QuerySource implements org.apache.thrift.TBase<QuerySource, QuerySource._Fields>, java.io.Serializable, Cloneable, Comparable<QuerySource> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("QuerySource");
 
@@ -122,7 +116,7 @@ public class QuerySource implements org.apache.thrift.TBase<QuerySource, QuerySo
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(QuerySource.class, metaDataMap);
@@ -330,7 +324,7 @@ public class QuerySource implements org.apache.thrift.TBase<QuerySource, QuerySo
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -338,7 +332,7 @@ public class QuerySource implements org.apache.thrift.TBase<QuerySource, QuerySo
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.tableName = iprot.readString();
               struct.setTableNameIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/QueryType.java b/pinot-common/src/main/java/org/apache/pinot/common/request/QueryType.java
index d0b1208..a49112d 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/QueryType.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/QueryType.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,29 +6,40 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  *  Query type
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-8-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class QueryType implements org.apache.thrift.TBase<QueryType, QueryType._Fields>, java.io.Serializable, Cloneable, Comparable<QueryType> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("QueryType");
 
@@ -149,15 +142,15 @@ public class QueryType implements org.apache.thrift.TBase<QueryType, QueryType._
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.HAS_SELECTION, new org.apache.thrift.meta_data.FieldMetaData("hasSelection", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.HAS_SELECTION, new org.apache.thrift.meta_data.FieldMetaData("hasSelection", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    tmpMap.put(_Fields.HAS_FILTER, new org.apache.thrift.meta_data.FieldMetaData("hasFilter", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.HAS_FILTER, new org.apache.thrift.meta_data.FieldMetaData("hasFilter", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    tmpMap.put(_Fields.HAS_AGGREGATION, new org.apache.thrift.meta_data.FieldMetaData("hasAggregation", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.HAS_AGGREGATION, new org.apache.thrift.meta_data.FieldMetaData("hasAggregation", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    tmpMap.put(_Fields.HAS_GROUP_BY, new org.apache.thrift.meta_data.FieldMetaData("hasGroup_by", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.HAS_GROUP_BY, new org.apache.thrift.meta_data.FieldMetaData("hasGroup_by", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    tmpMap.put(_Fields.HAS_HAVING, new org.apache.thrift.meta_data.FieldMetaData("hasHaving", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.HAS_HAVING, new org.apache.thrift.meta_data.FieldMetaData("hasHaving", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(QueryType.class, metaDataMap);
@@ -634,7 +627,7 @@ public class QueryType implements org.apache.thrift.TBase<QueryType, QueryType._
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -642,7 +635,7 @@ public class QueryType implements org.apache.thrift.TBase<QueryType, QueryType._
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.hasSelection = iprot.readBool();
               struct.setHasSelectionIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -650,7 +643,7 @@ public class QueryType implements org.apache.thrift.TBase<QueryType, QueryType._
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.hasFilter = iprot.readBool();
               struct.setHasFilterIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -658,7 +651,7 @@ public class QueryType implements org.apache.thrift.TBase<QueryType, QueryType._
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.hasAggregation = iprot.readBool();
               struct.setHasAggregationIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -666,7 +659,7 @@ public class QueryType implements org.apache.thrift.TBase<QueryType, QueryType._
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.hasGroup_by = iprot.readBool();
               struct.setHasGroup_byIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -674,7 +667,7 @@ public class QueryType implements org.apache.thrift.TBase<QueryType, QueryType._
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.hasHaving = iprot.readBool();
               struct.setHasHavingIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/Selection.java b/pinot-common/src/main/java/org/apache/pinot/common/request/Selection.java
index 92a2b2c..b037a0b 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/Selection.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/Selection.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,29 +6,40 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  * Selection
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-5-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class Selection implements org.apache.thrift.TBase<Selection, Selection._Fields>, java.io.Serializable, Cloneable, Comparable<Selection> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Selection");
 
@@ -141,15 +134,15 @@ public class Selection implements org.apache.thrift.TBase<Selection, Selection._
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.SELECTION_COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("selectionColumns", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.SELECTION_COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("selectionColumns", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.SELECTION_SORT_SEQUENCE, new org.apache.thrift.meta_data.FieldMetaData("selectionSortSequence", org.apache.thrift.TFieldRequirementType.OPTIONAL,
-        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+    tmpMap.put(_Fields.SELECTION_SORT_SEQUENCE, new org.apache.thrift.meta_data.FieldMetaData("selectionSortSequence", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SelectionSort.class))));
-    tmpMap.put(_Fields.OFFSET, new org.apache.thrift.meta_data.FieldMetaData("offset", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.OFFSET, new org.apache.thrift.meta_data.FieldMetaData("offset", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.SIZE, new org.apache.thrift.meta_data.FieldMetaData("size", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.SIZE, new org.apache.thrift.meta_data.FieldMetaData("size", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Selection.class, metaDataMap);
@@ -609,44 +602,44 @@ public class Selection implements org.apache.thrift.TBase<Selection, Selection._
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
           case 1: // SELECTION_COLUMNS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list52 = iprot.readListBegin();
-                struct.selectionColumns = new ArrayList<String>(_list52.size);
-                String _elem53;
-                for (int _i54 = 0; _i54 < _list52.size; ++_i54)
+                org.apache.thrift.protocol.TList _list78 = iprot.readListBegin();
+                struct.selectionColumns = new ArrayList<String>(_list78.size);
+                String _elem79;
+                for (int _i80 = 0; _i80 < _list78.size; ++_i80)
                 {
-                  _elem53 = iprot.readString();
-                  struct.selectionColumns.add(_elem53);
+                  _elem79 = iprot.readString();
+                  struct.selectionColumns.add(_elem79);
                 }
                 iprot.readListEnd();
               }
               struct.setSelectionColumnsIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
           case 2: // SELECTION_SORT_SEQUENCE
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list55 = iprot.readListBegin();
-                struct.selectionSortSequence = new ArrayList<SelectionSort>(_list55.size);
-                SelectionSort _elem56;
-                for (int _i57 = 0; _i57 < _list55.size; ++_i57)
+                org.apache.thrift.protocol.TList _list81 = iprot.readListBegin();
+                struct.selectionSortSequence = new ArrayList<SelectionSort>(_list81.size);
+                SelectionSort _elem82;
+                for (int _i83 = 0; _i83 < _list81.size; ++_i83)
                 {
-                  _elem56 = new SelectionSort();
-                  _elem56.read(iprot);
-                  struct.selectionSortSequence.add(_elem56);
+                  _elem82 = new SelectionSort();
+                  _elem82.read(iprot);
+                  struct.selectionSortSequence.add(_elem82);
                 }
                 iprot.readListEnd();
               }
               struct.setSelectionSortSequenceIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -654,7 +647,7 @@ public class Selection implements org.apache.thrift.TBase<Selection, Selection._
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.offset = iprot.readI32();
               struct.setOffsetIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -662,7 +655,7 @@ public class Selection implements org.apache.thrift.TBase<Selection, Selection._
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.size = iprot.readI32();
               struct.setSizeIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -684,9 +677,9 @@ public class Selection implements org.apache.thrift.TBase<Selection, Selection._
           oprot.writeFieldBegin(SELECTION_COLUMNS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.selectionColumns.size()));
-            for (String _iter58 : struct.selectionColumns)
+            for (String _iter84 : struct.selectionColumns)
             {
-              oprot.writeString(_iter58);
+              oprot.writeString(_iter84);
             }
             oprot.writeListEnd();
           }
@@ -698,9 +691,9 @@ public class Selection implements org.apache.thrift.TBase<Selection, Selection._
           oprot.writeFieldBegin(SELECTION_SORT_SEQUENCE_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.selectionSortSequence.size()));
-            for (SelectionSort _iter59 : struct.selectionSortSequence)
+            for (SelectionSort _iter85 : struct.selectionSortSequence)
             {
-              _iter59.write(oprot);
+              _iter85.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -751,18 +744,18 @@ public class Selection implements org.apache.thrift.TBase<Selection, Selection._
       if (struct.isSetSelectionColumns()) {
         {
           oprot.writeI32(struct.selectionColumns.size());
-          for (String _iter60 : struct.selectionColumns)
+          for (String _iter86 : struct.selectionColumns)
           {
-            oprot.writeString(_iter60);
+            oprot.writeString(_iter86);
           }
         }
       }
       if (struct.isSetSelectionSortSequence()) {
         {
           oprot.writeI32(struct.selectionSortSequence.size());
-          for (SelectionSort _iter61 : struct.selectionSortSequence)
+          for (SelectionSort _iter87 : struct.selectionSortSequence)
           {
-            _iter61.write(oprot);
+            _iter87.write(oprot);
           }
         }
       }
@@ -780,27 +773,27 @@ public class Selection implements org.apache.thrift.TBase<Selection, Selection._
       BitSet incoming = iprot.readBitSet(4);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list62 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.selectionColumns = new ArrayList<String>(_list62.size);
-          String _elem63;
-          for (int _i64 = 0; _i64 < _list62.size; ++_i64)
+          org.apache.thrift.protocol.TList _list88 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.selectionColumns = new ArrayList<String>(_list88.size);
+          String _elem89;
+          for (int _i90 = 0; _i90 < _list88.size; ++_i90)
           {
-            _elem63 = iprot.readString();
-            struct.selectionColumns.add(_elem63);
+            _elem89 = iprot.readString();
+            struct.selectionColumns.add(_elem89);
           }
         }
         struct.setSelectionColumnsIsSet(true);
       }
       if (incoming.get(1)) {
         {
-          org.apache.thrift.protocol.TList _list65 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.selectionSortSequence = new ArrayList<SelectionSort>(_list65.size);
-          SelectionSort _elem66;
-          for (int _i67 = 0; _i67 < _list65.size; ++_i67)
+          org.apache.thrift.protocol.TList _list91 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.selectionSortSequence = new ArrayList<SelectionSort>(_list91.size);
+          SelectionSort _elem92;
+          for (int _i93 = 0; _i93 < _list91.size; ++_i93)
           {
-            _elem66 = new SelectionSort();
-            _elem66.read(iprot);
-            struct.selectionSortSequence.add(_elem66);
+            _elem92 = new SelectionSort();
+            _elem92.read(iprot);
+            struct.selectionSortSequence.add(_elem92);
           }
         }
         struct.setSelectionSortSequenceIsSet(true);
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/SelectionSort.java b/pinot-common/src/main/java/org/apache/pinot/common/request/SelectionSort.java
index aef39a2..413f8fb 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/SelectionSort.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/SelectionSort.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,30 +6,41 @@
  */
 package org.apache.pinot.common.request;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * AUTO GENERATED: DO NOT EDIT
  * selection-sort : specifies how the search results should be sorted.
  * The results can be sorted based on one or multiple columns
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-5-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class SelectionSort implements org.apache.thrift.TBase<SelectionSort, SelectionSort._Fields>, java.io.Serializable, Cloneable, Comparable<SelectionSort> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SelectionSort");
 
@@ -131,9 +124,9 @@ public class SelectionSort implements org.apache.thrift.TBase<SelectionSort, Sel
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.COLUMN, new org.apache.thrift.meta_data.FieldMetaData("column", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.IS_ASC, new org.apache.thrift.meta_data.FieldMetaData("isAsc", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.IS_ASC, new org.apache.thrift.meta_data.FieldMetaData("isAsc", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SelectionSort.class, metaDataMap);
@@ -412,7 +405,7 @@ public class SelectionSort implements org.apache.thrift.TBase<SelectionSort, Sel
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -420,7 +413,7 @@ public class SelectionSort implements org.apache.thrift.TBase<SelectionSort, Sel
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.column = iprot.readString();
               struct.setColumnIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -428,7 +421,7 @@ public class SelectionSort implements org.apache.thrift.TBase<SelectionSort, Sel
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.isAsc = iprot.readBool();
               struct.setIsAscIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/response/ProcessingException.java b/pinot-common/src/main/java/org/apache/pinot/common/response/ProcessingException.java
index f0463ac..2035406 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/response/ProcessingException.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/response/ProcessingException.java
@@ -1,22 +1,4 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -24,29 +6,39 @@
  */
 package org.apache.pinot.common.response;
 
-import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Generated;
-import org.apache.thrift.EncodingUtils;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TTupleProtocol;
 import org.apache.thrift.scheme.IScheme;
 import org.apache.thrift.scheme.SchemeFactory;
 import org.apache.thrift.scheme.StandardScheme;
+
 import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 /**
  * Processing exception
- *
+ * 
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2017-5-24")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2019-1-18")
 public class ProcessingException extends TException implements org.apache.thrift.TBase<ProcessingException, ProcessingException._Fields>, java.io.Serializable, Cloneable, Comparable<ProcessingException> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ProcessingException");
 
@@ -130,9 +122,9 @@ public class ProcessingException extends TException implements org.apache.thrift
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.REQUIRED,
+    tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ProcessingException.class, metaDataMap);
@@ -421,7 +413,7 @@ public class ProcessingException extends TException implements org.apache.thrift
       while (true)
       {
         schemeField = iprot.readFieldBegin();
-        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
           break;
         }
         switch (schemeField.id) {
@@ -429,7 +421,7 @@ public class ProcessingException extends TException implements org.apache.thrift
             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
               struct.errorCode = iprot.readI32();
               struct.setErrorCodeIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
@@ -437,7 +429,7 @@ public class ProcessingException extends TException implements org.apache.thrift
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.message = iprot.readString();
               struct.setMessageIsSet(true);
-            } else {
+            } else { 
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
diff --git a/pinot-common/src/main/java/org/apache/pinot/pql/parsers/Pql2AstListener.java b/pinot-common/src/main/java/org/apache/pinot/pql/parsers/Pql2AstListener.java
index 7de7a26..1b6669d 100644
--- a/pinot-common/src/main/java/org/apache/pinot/pql/parsers/Pql2AstListener.java
+++ b/pinot-common/src/main/java/org/apache/pinot/pql/parsers/Pql2AstListener.java
@@ -35,6 +35,7 @@ import org.apache.pinot.pql.parsers.pql2.ast.InPredicateAstNode;
 import org.apache.pinot.pql.parsers.pql2.ast.IntegerLiteralAstNode;
 import org.apache.pinot.pql.parsers.pql2.ast.IsPredicateAstNode;
 import org.apache.pinot.pql.parsers.pql2.ast.LimitAstNode;
+import org.apache.pinot.pql.parsers.pql2.ast.MatchesPredicateAstNode;
 import org.apache.pinot.pql.parsers.pql2.ast.OptionAstNode;
 import org.apache.pinot.pql.parsers.pql2.ast.OptionsAstNode;
 import org.apache.pinot.pql.parsers.pql2.ast.OrderByAstNode;
@@ -291,6 +292,16 @@ public class Pql2AstListener extends PQL2BaseListener {
   }
 
   @Override
+  public void enterMatchesPredicate(@NotNull PQL2Parser.MatchesPredicateContext ctx) {
+    pushNode(new MatchesPredicateAstNode());
+  }
+
+  @Override
+  public void exitMatchesPredicate(@NotNull PQL2Parser.MatchesPredicateContext ctx) {
+    popNode();
+  }
+  
+  @Override
   public void enterHaving(@NotNull PQL2Parser.HavingContext ctx) {
     pushNode(new HavingAstNode());
   }
diff --git a/pinot-common/src/main/java/org/apache/pinot/pql/parsers/pql2/ast/MatchesPredicateAstNode.java b/pinot-common/src/main/java/org/apache/pinot/pql/parsers/pql2/ast/MatchesPredicateAstNode.java
new file mode 100644
index 0000000..7715064
--- /dev/null
+++ b/pinot-common/src/main/java/org/apache/pinot/pql/parsers/pql2/ast/MatchesPredicateAstNode.java
@@ -0,0 +1,62 @@
+package org.apache.pinot.pql.parsers.pql2.ast;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.pinot.common.request.BrokerRequest;
+import org.apache.pinot.common.request.FilterOperator;
+import org.apache.pinot.common.utils.request.FilterQueryTree;
+import org.apache.pinot.common.utils.request.HavingQueryTree;
+import org.apache.pinot.pql.parsers.Pql2CompilationException;
+
+public class MatchesPredicateAstNode extends PredicateAstNode {
+
+  private static final String SEPERATOR = "\t\t";
+  private String _identifier;
+
+  @Override
+  public void addChild(AstNode childNode) {
+    if (childNode instanceof IdentifierAstNode) {
+      if (_identifier == null) {
+        IdentifierAstNode node = (IdentifierAstNode) childNode;
+        _identifier = node.getName();
+      } else {
+        throw new Pql2CompilationException("TEXT_MATCH predicate has more than one identifier.");
+      }
+    } else if (childNode instanceof FunctionCallAstNode) {
+      throw new Pql2CompilationException("TEXT_MATCH operator can not be called for a function.");
+    } else {
+      super.addChild(childNode);
+    }
+  }
+
+  @Override
+  public FilterQueryTree buildFilterQueryTree() {
+    if (_identifier == null) {
+      throw new Pql2CompilationException("TEXT_MATCH predicate has no identifier");
+    }
+
+    List<String> values = new ArrayList<>();
+
+    for (AstNode astNode : getChildren()) {
+      if (astNode instanceof LiteralAstNode) {
+        LiteralAstNode node = (LiteralAstNode) astNode;
+        String expr = node.getValueAsString();
+        values.add(expr);
+      }
+    }
+    if (values.size() != 2) {
+      throw new Pql2CompilationException(
+          "TEXT_MATCH expects columnName, 'queryString', 'queryOption'");
+    }
+
+    FilterOperator filterOperator = FilterOperator.TEXT_MATCH;
+    return new FilterQueryTree(_identifier, values, filterOperator, null);
+  }
+
+  @Override
+  public HavingQueryTree buildHavingQueryTree() {
+    throw new Pql2CompilationException("TEXT_MATCH predicate is not supported in HAVING clause.");
+  }
+
+}
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/common/Predicate.java b/pinot-core/src/main/java/org/apache/pinot/core/common/Predicate.java
index 41ddd6f..302dd0c 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/common/Predicate.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/common/Predicate.java
@@ -28,12 +28,14 @@ import org.apache.pinot.core.common.predicate.NEqPredicate;
 import org.apache.pinot.core.common.predicate.NotInPredicate;
 import org.apache.pinot.core.common.predicate.RangePredicate;
 import org.apache.pinot.core.common.predicate.RegexpLikePredicate;
+import org.apache.pinot.core.common.predicate.MatchesPredicate;
 
 
 public abstract class Predicate {
 
   public enum Type {
-    EQ, NEQ, REGEXP_LIKE, RANGE, IN, NOT_IN;
+
+    EQ, NEQ, REGEXP_LIKE, RANGE, IN, NOT_IN, MATCHES;
 
     public boolean isExclusive() {
       return this == NEQ || this == NOT_IN;
@@ -74,8 +76,9 @@ public abstract class Predicate {
     final String column = filterQueryTree.getColumn();
     final List<String> value = filterQueryTree.getValue();
 
-    Predicate predicate = null;
+    Predicate predicate;
     switch (filterType) {
+<<<<<<< HEAD
       case EQUALITY:
         predicate = new EqPredicate(column, value);
         break;
@@ -94,8 +97,36 @@ public abstract class Predicate {
       case IN:
         predicate = new InPredicate(column, value);
         break;
+      case TEXT_MATCH:
+        predicate = new MatchesPredicate(column, value);
+        break;
       default:
         throw new UnsupportedOperationException("Unsupported filterType:" + filterType);
+=======
+    case EQUALITY:
+      predicate = new EqPredicate(column, value);
+      break;
+    case RANGE:
+      predicate = new RangePredicate(column, value);
+      break;
+    case REGEXP_LIKE:
+      predicate = new RegexpLikePredicate(column, value);
+      break;
+    case NOT:
+      predicate = new NEqPredicate(column, value);
+      break;
+    case NOT_IN:
+      predicate = new NotInPredicate(column, value);
+      break;
+    case IN:
+      predicate = new InPredicate(column, value);
+      break;
+    case MATCHES:
+      predicate = new MatchesPredicate(column, value);
+      break;
+    default:
+      throw new UnsupportedOperationException("Unsupported filterType:" + filterType);
+>>>>>>> Enhancing PQL to support MATCHES predicate, can be used for searching within text, map, json and other complex objects
     }
     return predicate;
   }
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/common/predicate/MatchesPredicate.java b/pinot-core/src/main/java/org/apache/pinot/core/common/predicate/MatchesPredicate.java
new file mode 100644
index 0000000..8b0e285
--- /dev/null
+++ b/pinot-core/src/main/java/org/apache/pinot/core/common/predicate/MatchesPredicate.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.core.common.predicate;
+
+import com.google.common.base.Preconditions;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.pinot.core.common.Predicate;
+
+
+public class MatchesPredicate extends Predicate {
+  String _query;
+  String _options;
+  public MatchesPredicate(String lhs, List<String> rhs) {
+    super(lhs, Type.MATCHES, rhs);
+    Preconditions.checkArgument(rhs.size() == 2);
+    _query = rhs.get(0);
+    _options = rhs.get(1);
+  }
+
+  @Override
+  public String toString() {
+    return "Predicate: type: " + getType() + ", left : " + getLhs() + ", right : " + Arrays.toString(new String[]{_query, _options}) + "\n";
+  }
+  
+  public String getQuery(){
+   return _query;
+  }
+
+  public String getQueryOptions(){
+    return _options;
+  }
+
+}
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/MatchesPredicateEvaluatorFactory.java b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/MatchesPredicateEvaluatorFactory.java
new file mode 100644
index 0000000..26b7e16
--- /dev/null
+++ b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/MatchesPredicateEvaluatorFactory.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.core.operator.filter.predicate;
+
+import org.apache.pinot.common.data.FieldSpec;
+import org.apache.pinot.core.common.Predicate;
+import org.apache.pinot.core.common.predicate.MatchesPredicate;
+
+public class MatchesPredicateEvaluatorFactory {
+  private MatchesPredicateEvaluatorFactory() {
+  }
+
+  /**
+   * Create a new instance of raw value based REGEXP_LIKE predicate evaluator.
+   * @param textMatchPredicate REGEXP_LIKE predicate to evaluate
+   * @param dataType Data type for the column
+   * @return Raw value based REGEXP_LIKE predicate evaluator
+   */
+  public static BaseRawValueBasedPredicateEvaluator newRawValueBasedEvaluator(
+      MatchesPredicate textMatchPredicate, FieldSpec.DataType dataType) {
+    return new RawValueBasedTextMatchPredicateEvaluator(textMatchPredicate);
+  }
+
+  public static final class RawValueBasedTextMatchPredicateEvaluator
+      extends BaseRawValueBasedPredicateEvaluator {
+    String _query;
+    String _options;
+
+    public RawValueBasedTextMatchPredicateEvaluator(MatchesPredicate textMatchPredicate) {
+      _query = textMatchPredicate.getQuery();
+      _options = textMatchPredicate.getQueryOptions();
+    }
+
+    @Override
+    public Predicate.Type getPredicateType() {
+      return Predicate.Type.MATCHES;
+    }
+
+    @Override
+    public boolean applySV(String value) {
+      throw new UnsupportedOperationException(
+          "Text Match is not supported via scanning, its supported only via inverted index");
+    }
+
+    public String getQueryString() {
+      return _query;
+    }
+
+    public String getQueryOptions() {
+      return _options;
+    }
+  }
+}
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/PredicateEvaluatorProvider.java b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/PredicateEvaluatorProvider.java
index 3acb0dd..80626ca 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/PredicateEvaluatorProvider.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/operator/filter/predicate/PredicateEvaluatorProvider.java
@@ -27,6 +27,7 @@ import org.apache.pinot.core.common.predicate.NEqPredicate;
 import org.apache.pinot.core.common.predicate.NotInPredicate;
 import org.apache.pinot.core.common.predicate.RangePredicate;
 import org.apache.pinot.core.common.predicate.RegexpLikePredicate;
+import org.apache.pinot.core.common.predicate.MatchesPredicate;
 import org.apache.pinot.core.query.exception.BadQueryRequestException;
 import org.apache.pinot.core.segment.index.readers.Dictionary;
 
@@ -53,6 +54,7 @@ public class PredicateEvaluatorProvider {
           case REGEXP_LIKE:
             return RegexpLikePredicateEvaluatorFactory
                 .newDictionaryBasedEvaluator((RegexpLikePredicate) predicate, dictionary);
+          case MATCHES:
           default:
             throw new UnsupportedOperationException("Unsupported predicate type: " + predicate.getType());
         }
@@ -72,7 +74,9 @@ public class PredicateEvaluatorProvider {
           case REGEXP_LIKE:
             return RegexpLikePredicateEvaluatorFactory
                 .newRawValueBasedEvaluator((RegexpLikePredicate) predicate, dataType);
-          default:
+          case MATCHES:
+            return MatchesPredicateEvaluatorFactory.newRawValueBasedEvaluator((MatchesPredicate) predicate, dataType);
+        default:
             throw new UnsupportedOperationException("Unsupported predicate type: " + predicate.getType());
         }
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pinot.apache.org
For additional commands, e-mail: commits-help@pinot.apache.org


[incubator-pinot] 02/06: Enhancing PQL to support MATCHES predicate, can be used for searching within text, map, json and other complex objects

Posted by xi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xiangfu pushed a commit to branch nested-object-indexing-1
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git

commit 91257aa8f4a16f46631e6526d56dd08b6ad7f631
Author: kishore gopalakrishna <g....@gmail.com>
AuthorDate: Sat Jan 19 12:41:01 2019 -0800

    Enhancing PQL to support MATCHES predicate, can be used for searching within text, map, json and other complex objects
---
 .../antlr4/org/apache/pinot/pql/parsers/PQL2.g4    |  2 +-
 .../pinot/common/request/AggregationInfo.java      | 18 ++++++++++++
 .../apache/pinot/common/request/BrokerRequest.java | 18 ++++++++++++
 .../pinot/common/request/FilterOperator.java       | 22 +++++++++++++--
 .../apache/pinot/common/request/FilterQuery.java   | 18 ++++++++++++
 .../pinot/common/request/FilterQueryMap.java       | 18 ++++++++++++
 .../org/apache/pinot/common/request/GroupBy.java   | 18 ++++++++++++
 .../pinot/common/request/HavingFilterQuery.java    | 18 ++++++++++++
 .../pinot/common/request/HavingFilterQueryMap.java | 18 ++++++++++++
 .../pinot/common/request/InstanceRequest.java      | 18 ++++++++++++
 .../apache/pinot/common/request/QuerySource.java   | 18 ++++++++++++
 .../org/apache/pinot/common/request/QueryType.java | 18 ++++++++++++
 .../org/apache/pinot/common/request/Selection.java | 18 ++++++++++++
 .../apache/pinot/common/request/SelectionSort.java | 18 ++++++++++++
 .../pinot/common/response/ProcessingException.java | 18 ++++++++++++
 .../parsers/pql2/ast/MatchesPredicateAstNode.java  | 33 ++++++++++++++++------
 .../apache/pinot/pql/parsers/Pql2CompilerTest.java | 16 +++++++++++
 pinot-common/src/thrift/request.thrift             |  3 +-
 18 files changed, 298 insertions(+), 12 deletions(-)

diff --git a/pinot-common/src/main/antlr4/org/apache/pinot/pql/parsers/PQL2.g4 b/pinot-common/src/main/antlr4/org/apache/pinot/pql/parsers/PQL2.g4
index 17652ca..58c3576 100644
--- a/pinot-common/src/main/antlr4/org/apache/pinot/pql/parsers/PQL2.g4
+++ b/pinot-common/src/main/antlr4/org/apache/pinot/pql/parsers/PQL2.g4
@@ -97,7 +97,7 @@ regexpLikeClause:
   REGEXP_LIKE '(' expression ',' literal ')';
 
 matchesClause:
-  expression MATCHES '(' literal ',' literal ')';
+  expression MATCHES '(' literal (',' literal)? ')';
 
 booleanOperator: OR | AND;
 
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/AggregationInfo.java b/pinot-common/src/main/java/org/apache/pinot/common/request/AggregationInfo.java
index ab129ff..439d319 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/AggregationInfo.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/AggregationInfo.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/BrokerRequest.java b/pinot-common/src/main/java/org/apache/pinot/common/request/BrokerRequest.java
index 3ff9ad1..51a01d8 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/BrokerRequest.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/BrokerRequest.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterOperator.java b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterOperator.java
index a2f4351..e48d848 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterOperator.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterOperator.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -25,7 +43,7 @@ public enum FilterOperator implements org.apache.thrift.TEnum {
   REGEXP_LIKE(5),
   NOT_IN(6),
   IN(7),
-  TEXT_MATCH(8);
+  MATCHES(8);
 
   private final int value;
 
@@ -63,7 +81,7 @@ public enum FilterOperator implements org.apache.thrift.TEnum {
       case 7:
         return IN;
       case 8:
-        return TEXT_MATCH;
+        return MATCHES;
       default:
         return null;
     }
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQuery.java b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQuery.java
index 3e6b8fe..a6d8fca 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQuery.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQuery.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQueryMap.java b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQueryMap.java
index 2a15e98..02a4ddc 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQueryMap.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/FilterQueryMap.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/GroupBy.java b/pinot-common/src/main/java/org/apache/pinot/common/request/GroupBy.java
index 6381642..b9707ed 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/GroupBy.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/GroupBy.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQuery.java b/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQuery.java
index d50e055..85e8ceb 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQuery.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQuery.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQueryMap.java b/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQueryMap.java
index 0d3620a..9505d9a 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQueryMap.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/HavingFilterQueryMap.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/InstanceRequest.java b/pinot-common/src/main/java/org/apache/pinot/common/request/InstanceRequest.java
index 1bd3d19..766318c 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/InstanceRequest.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/InstanceRequest.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/QuerySource.java b/pinot-common/src/main/java/org/apache/pinot/common/request/QuerySource.java
index 9c63591..903880e 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/QuerySource.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/QuerySource.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/QueryType.java b/pinot-common/src/main/java/org/apache/pinot/common/request/QueryType.java
index a49112d..0955d71 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/QueryType.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/QueryType.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/Selection.java b/pinot-common/src/main/java/org/apache/pinot/common/request/Selection.java
index b037a0b..4c495a9 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/Selection.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/Selection.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/request/SelectionSort.java b/pinot-common/src/main/java/org/apache/pinot/common/request/SelectionSort.java
index 413f8fb..af6b16a 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/request/SelectionSort.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/request/SelectionSort.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/response/ProcessingException.java b/pinot-common/src/main/java/org/apache/pinot/common/response/ProcessingException.java
index 2035406..e5935b8 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/response/ProcessingException.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/response/ProcessingException.java
@@ -1,4 +1,22 @@
 /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/**
  * Autogenerated by Thrift Compiler (0.9.2)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
diff --git a/pinot-common/src/main/java/org/apache/pinot/pql/parsers/pql2/ast/MatchesPredicateAstNode.java b/pinot-common/src/main/java/org/apache/pinot/pql/parsers/pql2/ast/MatchesPredicateAstNode.java
index 7715064..097ca84 100644
--- a/pinot-common/src/main/java/org/apache/pinot/pql/parsers/pql2/ast/MatchesPredicateAstNode.java
+++ b/pinot-common/src/main/java/org/apache/pinot/pql/parsers/pql2/ast/MatchesPredicateAstNode.java
@@ -1,3 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
 package org.apache.pinot.pql.parsers.pql2.ast;
 
 import java.util.ArrayList;
@@ -11,7 +29,6 @@ import org.apache.pinot.pql.parsers.Pql2CompilationException;
 
 public class MatchesPredicateAstNode extends PredicateAstNode {
 
-  private static final String SEPERATOR = "\t\t";
   private String _identifier;
 
   @Override
@@ -21,10 +38,10 @@ public class MatchesPredicateAstNode extends PredicateAstNode {
         IdentifierAstNode node = (IdentifierAstNode) childNode;
         _identifier = node.getName();
       } else {
-        throw new Pql2CompilationException("TEXT_MATCH predicate has more than one identifier.");
+        throw new Pql2CompilationException("MATCHES predicate has more than one identifier.");
       }
     } else if (childNode instanceof FunctionCallAstNode) {
-      throw new Pql2CompilationException("TEXT_MATCH operator can not be called for a function.");
+      throw new Pql2CompilationException("MATCHES operator can not be called for a function.");
     } else {
       super.addChild(childNode);
     }
@@ -33,7 +50,7 @@ public class MatchesPredicateAstNode extends PredicateAstNode {
   @Override
   public FilterQueryTree buildFilterQueryTree() {
     if (_identifier == null) {
-      throw new Pql2CompilationException("TEXT_MATCH predicate has no identifier");
+      throw new Pql2CompilationException("MATCHES predicate has no identifier");
     }
 
     List<String> values = new ArrayList<>();
@@ -45,18 +62,18 @@ public class MatchesPredicateAstNode extends PredicateAstNode {
         values.add(expr);
       }
     }
-    if (values.size() != 2) {
+    if (values.size() != 1 && values.size() != 2) {
       throw new Pql2CompilationException(
-          "TEXT_MATCH expects columnName, 'queryString', 'queryOption'");
+          "MATCHES expects columnName, 'queryString', 'queryOptions' (optional)");
     }
 
-    FilterOperator filterOperator = FilterOperator.TEXT_MATCH;
+    FilterOperator filterOperator = FilterOperator.MATCHES;
     return new FilterQueryTree(_identifier, values, filterOperator, null);
   }
 
   @Override
   public HavingQueryTree buildHavingQueryTree() {
-    throw new Pql2CompilationException("TEXT_MATCH predicate is not supported in HAVING clause.");
+    throw new Pql2CompilationException("MATCHES predicate is not supported in HAVING clause.");
   }
 
 }
diff --git a/pinot-common/src/test/java/org/apache/pinot/pql/parsers/Pql2CompilerTest.java b/pinot-common/src/test/java/org/apache/pinot/pql/parsers/Pql2CompilerTest.java
index 8d57811..231f15f 100644
--- a/pinot-common/src/test/java/org/apache/pinot/pql/parsers/Pql2CompilerTest.java
+++ b/pinot-common/src/test/java/org/apache/pinot/pql/parsers/Pql2CompilerTest.java
@@ -238,4 +238,20 @@ public class Pql2CompilerTest {
     Assert.assertEquals(expressions.size(), 1);
     Assert.assertEquals(expressions.get(0), "sub('foo',bar)");
   }
+  
+  @Test
+  public void testMatchPredicate() {
+    BrokerRequest brokerRequest = COMPILER.compileToBrokerRequest("select a from T where col matches ('name=adam')");
+    Assert.assertEquals(brokerRequest.getFilterQuery().getColumn(), "col");
+    Assert.assertEquals(brokerRequest.getFilterQuery().getValueSize(), 1);
+    Assert.assertEquals(brokerRequest.getFilterQuery().getValue().get(0), "name=adam");
+    
+     brokerRequest = COMPILER.compileToBrokerRequest("select a from T where col matches ('name=adam', '{syntax:lucene}')");
+    Assert.assertEquals(brokerRequest.getFilterQuery().getColumn(), "col");
+    Assert.assertEquals(brokerRequest.getFilterQuery().getValueSize(), 2);
+    Assert.assertEquals(brokerRequest.getFilterQuery().getValue().get(0), "name=adam");
+    Assert.assertEquals(brokerRequest.getFilterQuery().getValue().get(0), "name=adam");
+
+
+  }
 }
diff --git a/pinot-common/src/thrift/request.thrift b/pinot-common/src/thrift/request.thrift
index 8108991..595cb70 100644
--- a/pinot-common/src/thrift/request.thrift
+++ b/pinot-common/src/thrift/request.thrift
@@ -30,7 +30,8 @@ enum FilterOperator {
   RANGE,
   REGEXP_LIKE,
   NOT_IN,
-  IN
+  IN,
+  MATCHES
 }
 
 /**


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pinot.apache.org
For additional commands, e-mail: commits-help@pinot.apache.org


[incubator-pinot] 03/06: Adding support for Object Type

Posted by xi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xiangfu pushed a commit to branch nested-object-indexing-1
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git

commit 74f8e6eede16c3a8eba97d449feeeb70d486b76b
Author: kishore gopalakrishna <g....@gmail.com>
AuthorDate: Sun Jan 20 17:49:05 2019 -0800

    Adding support for Object Type
---
 .../org/apache/pinot/common/data/FieldSpec.java    | 23 +++++-
 .../org/apache/pinot/common/data/PinotObject.java  | 58 +++++++++++++++
 .../pinot/common/data/objects/JSONObject.java      | 83 ++++++++++++++++++++++
 .../pinot/common/data/objects/MapObject.java       | 66 +++++++++++++++++
 .../pinot/common/data/objects/TextObject.java      | 53 ++++++++++++++
 .../creator/impl/SegmentColumnarIndexCreator.java  |  1 +
 .../core/segment/creator/impl/V1Constants.java     |  1 +
 .../pinot/core/segment/index/ColumnMetadata.java   | 21 ++++--
 8 files changed, 301 insertions(+), 5 deletions(-)

diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java b/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java
index 30be748..080f0e7 100644
--- a/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java
+++ b/pinot-common/src/main/java/org/apache/pinot/common/data/FieldSpec.java
@@ -86,6 +86,10 @@ public abstract class FieldSpec implements Comparable<FieldSpec>, ConfigNodeLife
 
   @ConfigKey("virtualColumnProvider")
   protected String _virtualColumnProvider;
+  
+  //Complex type that can be constructed from raw bytes stored e.g. map, json, text
+  @ConfigKey("objectType")
+  protected String _objectType;
 
   // Default constructor required by JSON de-serializer. DO NOT REMOVE.
   public FieldSpec() {
@@ -98,15 +102,21 @@ public abstract class FieldSpec implements Comparable<FieldSpec>, ConfigNodeLife
   public FieldSpec(String name, DataType dataType, boolean isSingleValueField, @Nullable Object defaultNullValue) {
     this(name, dataType, isSingleValueField, DEFAULT_MAX_LENGTH, defaultNullValue);
   }
-
+  
   public FieldSpec(String name, DataType dataType, boolean isSingleValueField, int maxLength,
       @Nullable Object defaultNullValue) {
+    this(name, dataType, isSingleValueField, maxLength, defaultNullValue, null);
+  }
+  
+  public FieldSpec(String name, DataType dataType, boolean isSingleValueField, int maxLength,
+      @Nullable Object defaultNullValue, @Nullable String objectType) {
     _name = name;
     _dataType = dataType.getStoredType();
     _isSingleValueField = isSingleValueField;
     _maxLength = maxLength;
     setDefaultNullValue(defaultNullValue);
   }
+  
 
   public abstract FieldType getFieldType();
 
@@ -183,6 +193,16 @@ public abstract class FieldSpec implements Comparable<FieldSpec>, ConfigNodeLife
       _defaultNullValue = getDefaultNullValue(getFieldType(), _dataType, _stringDefaultNullValue);
     }
   }
+  
+  
+  
+  public String getObjectType() {
+    return _objectType;
+  }
+
+  public void setObjectType(String objectType) {
+    _objectType = objectType;
+  }
 
   private static Object getDefaultNullValue(FieldType fieldType, DataType dataType,
       @Nullable String stringDefaultNullValue) {
@@ -353,6 +373,7 @@ public abstract class FieldSpec implements Comparable<FieldSpec>, ConfigNodeLife
     result = EqualityUtils.hashCodeOf(result, _isSingleValueField);
     result = EqualityUtils.hashCodeOf(result, getStringValue(_defaultNullValue));
     result = EqualityUtils.hashCodeOf(result, _maxLength);
+    result = EqualityUtils.hashCodeOf(result, _objectType);
     return result;
   }
 
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/PinotObject.java b/pinot-common/src/main/java/org/apache/pinot/common/data/PinotObject.java
new file mode 100644
index 0000000..3f1ca33
--- /dev/null
+++ b/pinot-common/src/main/java/org/apache/pinot/common/data/PinotObject.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.data;
+
+import java.util.List;
+
+/**
+ * Common interface for complex Object types such as HyperLogLog, Map, JSON etc.
+ * Flow to convert byte[] to PinotObject
+ * - compute the objectTypeClass from objectType (from schema/fieldSpec.objectType)
+ * - Instantiate PinotObject instance
+ * - call init(bytes)
+ * - expects all other methods to be implemented.
+ */
+public interface PinotObject {
+
+  /**
+   * Initializes the PinotObject from byte[]. Note that this method can be repeatedly called on the
+   * same instance of PinotObject.
+   * @param bytes
+   */
+  void init(byte[] bytes);
+
+  /**
+   * @return serialized byte form
+   */
+  byte[] toBytes();
+
+  /**
+   * @return list of properties in this object. Note, this can return nested properties using dot
+   *         notation
+   *         
+   */
+  List<String> getPropertyNames();
+
+  /**
+   * @param fieldName
+   * @return the value of the property, it can be a single object or a list of objects.
+   */
+  Object getProperty(String propertyName);
+
+}
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/objects/JSONObject.java b/pinot-common/src/main/java/org/apache/pinot/common/data/objects/JSONObject.java
new file mode 100644
index 0000000..ae7f82d
--- /dev/null
+++ b/pinot-common/src/main/java/org/apache/pinot/common/data/objects/JSONObject.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.data.objects;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.pinot.common.data.PinotObject;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.JsonNodeFactory;
+import com.google.common.collect.Lists;
+
+public class JSONObject implements PinotObject {
+  private static ObjectMapper _MAPPER = new ObjectMapper();
+  private JsonNode _jsonNode;
+
+  @Override
+  public void init(byte[] bytes) {
+    try {
+      _jsonNode = _MAPPER.readTree(bytes);
+    } catch (IOException e) {
+      _jsonNode = JsonNodeFactory.instance.objectNode();
+    }
+  }
+
+  @Override
+  public byte[] toBytes() {
+    try {
+      return _MAPPER.writeValueAsBytes(_jsonNode);
+    } catch (JsonProcessingException e) {
+      return "{}".getBytes();
+    }
+  }
+
+  @Override
+  public List<String> getPropertyNames() {
+    List<String> fields = Lists.newArrayList();
+    // TODO: Add support to iterate recursively
+    Iterator<String> iterator = _jsonNode.fieldNames();
+    while (iterator.hasNext()) {
+      String fieldName = (String) iterator.next();
+      fields.add(fieldName);
+    }
+    return fields;
+  }
+
+  @Override
+  public Object getProperty(String fieldName) {
+    JsonNode jsonNode = _jsonNode.get(fieldName);
+    if (jsonNode.isArray()) {
+      Iterator<JsonNode> iterator = jsonNode.iterator();
+      List<String> list = new ArrayList<String>();
+      while (iterator.hasNext()) {
+        list.add(iterator.next().asText());
+      }
+      return list;
+    } else {
+      return jsonNode.asText();
+    }
+  }
+
+}
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/objects/MapObject.java b/pinot-common/src/main/java/org/apache/pinot/common/data/objects/MapObject.java
new file mode 100644
index 0000000..606137d
--- /dev/null
+++ b/pinot-common/src/main/java/org/apache/pinot/common/data/objects/MapObject.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.data.objects;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.pinot.common.data.PinotObject;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.collect.Lists;
+
+public class MapObject implements PinotObject {
+
+  private static ObjectMapper _MAPPER = new ObjectMapper();
+  Map<String, Object> _stringMap;
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public void init(byte[] bytes) {
+    try {
+      _stringMap = _MAPPER.readValue(bytes, Map.class);
+    } catch (IOException e) {
+      _stringMap = Collections.emptyMap();
+    }
+  }
+
+  @Override
+  public byte[] toBytes() {
+    try {
+      return _MAPPER.writeValueAsBytes(_stringMap);
+    } catch (JsonProcessingException e) {
+      return "{}".getBytes();
+    }
+  }
+
+  @Override
+  public List<String> getPropertyNames() {
+    return Lists.newArrayList(_stringMap.keySet());
+  }
+
+  @Override
+  public Object getProperty(String field) {
+    return _stringMap.get(field);
+  }
+
+}
diff --git a/pinot-common/src/main/java/org/apache/pinot/common/data/objects/TextObject.java b/pinot-common/src/main/java/org/apache/pinot/common/data/objects/TextObject.java
new file mode 100644
index 0000000..cf5f27e
--- /dev/null
+++ b/pinot-common/src/main/java/org/apache/pinot/common/data/objects/TextObject.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.data.objects;
+
+import java.util.List;
+
+import org.apache.pinot.common.data.PinotObject;
+
+import com.google.common.collect.Lists;
+
+public class TextObject implements PinotObject {
+
+  byte[] _bytes;
+  private static List<String> _FIELDS = Lists.newArrayList("Content");
+
+  @Override
+  public void init(byte[] bytes) {
+    _bytes = bytes;
+  }
+
+  @Override
+  public byte[] toBytes() {
+    return _bytes;
+  }
+
+  @Override
+  public List<String> getPropertyNames() {
+    return _FIELDS;
+  }
+
+  @Override
+  public Object getProperty(String field) {
+    // TODO Auto-generated method stub
+    return null;
+  }
+
+}
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/SegmentColumnarIndexCreator.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/SegmentColumnarIndexCreator.java
index 14ba041..6e7642d 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/SegmentColumnarIndexCreator.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/SegmentColumnarIndexCreator.java
@@ -404,6 +404,7 @@ public class SegmentColumnarIndexCreator implements SegmentCreator {
         String.valueOf(PinotDataBitSet.getNumBitsPerValue(cardinality - 1)));
     properties.setProperty(getKeyFor(column, DICTIONARY_ELEMENT_SIZE), String.valueOf(dictionaryElementSize));
     properties.setProperty(getKeyFor(column, COLUMN_TYPE), String.valueOf(fieldSpec.getFieldType()));
+    properties.setProperty(getKeyFor(column, OBJECT_TYPE), fieldSpec.getObjectType());
     properties.setProperty(getKeyFor(column, IS_SORTED), String.valueOf(columnIndexCreationInfo.isSorted()));
     properties.setProperty(getKeyFor(column, HAS_NULL_VALUE), String.valueOf(columnIndexCreationInfo.hasNulls()));
     properties.setProperty(getKeyFor(column, HAS_DICTIONARY), String.valueOf(hasDictionary));
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/V1Constants.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/V1Constants.java
index 4d6dbdb..3536018 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/V1Constants.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/creator/impl/V1Constants.java
@@ -119,6 +119,7 @@ public class V1Constants {
       public static final String BITS_PER_ELEMENT = "bitsPerElement";
       public static final String DICTIONARY_ELEMENT_SIZE = "lengthOfEachEntry";
       public static final String COLUMN_TYPE = "columnType";
+      public static final String OBJECT_TYPE = "objectType";
       public static final String IS_SORTED = "isSorted";
       public static final String HAS_NULL_VALUE = "hasNullValue";
       public static final String HAS_DICTIONARY = "hasDictionary";
diff --git a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/ColumnMetadata.java b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/ColumnMetadata.java
index 1a37ffa..0ddca57 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/segment/index/ColumnMetadata.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/segment/index/ColumnMetadata.java
@@ -58,6 +58,7 @@ public class ColumnMetadata {
   private final int bitsPerElement;
   private final int columnMaxLength;
   private final FieldType fieldType;
+  private final String objectType;
   private final boolean isSorted;
   @JsonProperty
   private final boolean containsNulls;
@@ -98,6 +99,7 @@ public class ColumnMetadata {
     builder.setBitsPerElement(config.getInt(getKeyFor(column, BITS_PER_ELEMENT)));
     builder.setColumnMaxLength(config.getInt(getKeyFor(column, DICTIONARY_ELEMENT_SIZE)));
     builder.setFieldType(FieldType.valueOf(config.getString(getKeyFor(column, COLUMN_TYPE)).toUpperCase()));
+    builder.setObjectType(config.getString(getKeyFor(column, OBJECT_TYPE), null));
     builder.setIsSorted(config.getBoolean(getKeyFor(column, IS_SORTED)));
     builder.setContainsNulls(config.getBoolean(getKeyFor(column, HAS_NULL_VALUE)));
     builder.setHasDictionary(config.getBoolean(getKeyFor(column, HAS_DICTIONARY), true));
@@ -218,6 +220,7 @@ public class ColumnMetadata {
     private int bitsPerElement;
     private int columnMaxLength;
     private FieldType fieldType;
+    private String objectType;
     private boolean isSorted;
     private boolean containsNulls;
     private boolean hasDictionary;
@@ -286,6 +289,11 @@ public class ColumnMetadata {
       return this;
     }
 
+    public Builder setObjectType(String objectType) {
+      this.objectType = objectType;
+      return this;
+    }
+
     public Builder setIsSorted(boolean isSorted) {
       this.isSorted = isSorted;
       return this;
@@ -397,16 +405,16 @@ public class ColumnMetadata {
 
     public ColumnMetadata build() {
       return new ColumnMetadata(columnName, cardinality, totalDocs, totalRawDocs, totalAggDocs, dataType,
-          bitsPerElement, columnMaxLength, fieldType, isSorted, containsNulls, hasDictionary, hasInvertedIndex,
-          isSingleValue, maxNumberOfMultiValues, totalNumberOfEntries, isAutoGenerated, isVirtual,
+          bitsPerElement, columnMaxLength, fieldType, objectType, isSorted, containsNulls, hasDictionary,
+          hasInvertedIndex, isSingleValue, maxNumberOfMultiValues, totalNumberOfEntries, isAutoGenerated, isVirtual,
           defaultNullValueString, timeUnit, paddingCharacter, derivedMetricType, fieldSize, originColumnName, minValue,
           maxValue, partitionFunction, numPartitions, _partitions, dateTimeFormat, dateTimeGranularity);
     }
   }
 
   private ColumnMetadata(String columnName, int cardinality, int totalDocs, int totalRawDocs, int totalAggDocs,
-      DataType dataType, int bitsPerElement, int columnMaxLength, FieldType fieldType, boolean isSorted,
-      boolean hasNulls, boolean hasDictionary, boolean hasInvertedIndex, boolean isSingleValue,
+      DataType dataType, int bitsPerElement, int columnMaxLength, FieldType fieldType, String objectType,
+      boolean isSorted, boolean hasNulls, boolean hasDictionary, boolean hasInvertedIndex, boolean isSingleValue,
       int maxNumberOfMultiValues, int totalNumberOfEntries, boolean isAutoGenerated, boolean isVirtual,
       String defaultNullValueString, TimeUnit timeUnit, char paddingCharacter, DerivedMetricType derivedMetricType,
       int fieldSize, String originColumnName, Comparable minValue, Comparable maxValue,
@@ -421,6 +429,7 @@ public class ColumnMetadata {
     this.bitsPerElement = bitsPerElement;
     this.columnMaxLength = columnMaxLength;
     this.fieldType = fieldType;
+    this.objectType = objectType;
     this.isSorted = isSorted;
     this.containsNulls = hasNulls;
     this.hasDictionary = hasDictionary;
@@ -508,6 +517,10 @@ public class ColumnMetadata {
     return fieldType;
   }
 
+  public String getObjectType() {
+    return objectType;
+  }
+
   public boolean isSorted() {
     return isSorted;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@pinot.apache.org
For additional commands, e-mail: commits-help@pinot.apache.org