You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by am...@apache.org on 2018/10/25 23:09:03 UTC

[drill] 07/08: DRILL-6381: Address review comments (part 2): fix formatting issues and add javadoc.

This is an automated email from the ASF dual-hosted git repository.

amansinha pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git

commit 387bc4fefc40b645685439fd1da43f7223e5933c
Author: Aman Sinha <as...@maprtech.com>
AuthorDate: Wed Oct 10 16:36:48 2018 -0700

    DRILL-6381: Address review comments (part 2): fix formatting issues and add javadoc.
---
 .../planner/index/MapRDBFunctionalIndexInfo.java   |  22 ++--
 .../exec/planner/index/MapRDBIndexDescriptor.java  |   4 +-
 .../exec/store/mapr/db/MapRDBScanBatchCreator.java |   4 +-
 .../exec/store/mapr/db/MapRDBSubScanSpec.java      |   2 +-
 .../store/mapr/db/json/JsonTableGroupScan.java     |  17 ++-
 .../db/json/JsonTableRangePartitionFunction.java   |   4 +-
 .../store/mapr/db/json/OjaiFunctionsProcessor.java |   2 +-
 .../mapr/db/json/RestrictedJsonTableGroupScan.java |   8 +-
 .../drill/exec/udf/mapr/db/DecodeFieldPath.java    |   4 +-
 .../drill/exec/udf/mapr/db/MatchesPlaceholder.java |   7 +-
 .../exec/udf/mapr/db/NotMatchesPlaceholder.java    |   7 +-
 .../exec/udf/mapr/db/NotTypeOfPlaceholder.java     |   7 +-
 .../drill/exec/udf/mapr/db/SizeOfPlaceholder.java  |   7 +-
 .../drill/exec/udf/mapr/db/TypeOfPlaceholder.java  |   7 +-
 .../maprdb/tests/index/IndexHintPlanTest.java      |  19 ++-
 .../drill/maprdb/tests/index/IndexPlanTest.java    | 135 +++++++++------------
 .../drill/maprdb/tests/index/LargeTableGen.java    |  34 +++---
 .../maprdb/tests/index/LargeTableGenBase.java      |  25 ++--
 .../drill/maprdb/tests/index/StatisticsTest.java   |  21 ++--
 .../drill/maprdb/tests/index/TableIndexCmd.java    |  26 ++--
 .../drill/exec/physical/base/IndexGroupScan.java   |   1 -
 .../exec/physical/impl/join/RowKeyJoinBatch.java   |  18 +--
 .../exec/planner/index/DrillIndexDefinition.java   |  22 ++--
 .../exec/planner/index/DrillIndexDescriptor.java   |  24 +++-
 .../exec/planner/index/IndexConditionInfo.java     |  44 ++++---
 .../drill/exec/planner/index/IndexPlanUtils.java   |  74 +++++------
 .../drill/exec/planner/index/IndexSelector.java    |   2 +-
 .../drill/exec/planner/index/SimpleRexRemap.java   |  18 +--
 .../planner/index/rules/AbstractMatchFunction.java |   8 +-
 .../planner/logical/DrillMergeProjectRule.java     |   2 +-
 30 files changed, 271 insertions(+), 304 deletions(-)

diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/planner/index/MapRDBFunctionalIndexInfo.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/planner/index/MapRDBFunctionalIndexInfo.java
index ec38636..67938f3 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/planner/index/MapRDBFunctionalIndexInfo.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/planner/index/MapRDBFunctionalIndexInfo.java
@@ -32,15 +32,15 @@ public class MapRDBFunctionalIndexInfo implements FunctionalIndexInfo {
 
   private boolean hasFunctionalField = false;
 
-  //when we scan schemaPath in groupscan's columns, we check if this column(schemaPath) should be rewritten to '$N',
-  //When there are more than two functions on the same column in index, CAST(a.b as INT), CAST(a.b as VARCHAR),
+  // When we scan schemaPath in groupscan's columns, we check if this column(schemaPath) should be rewritten to '$N',
+  // When there are more than two functions on the same column in index, CAST(a.b as INT), CAST(a.b as VARCHAR),
   // then we should map SchemaPath a.b to a set of SchemaPath, e.g. $1, $2
   private Map<SchemaPath, Set<SchemaPath>> columnToConvert;
 
   // map of functional index expression to destination SchemaPath e.g. $N
   private Map<LogicalExpression, LogicalExpression> exprToConvert;
 
-  //map of SchemaPath involved in a functional field
+  // map of SchemaPath involved in a functional field
   private Map<LogicalExpression, Set<SchemaPath>> pathsInExpr;
 
   private Set<SchemaPath> newPathsForIndexedFunction;
@@ -52,7 +52,7 @@ public class MapRDBFunctionalIndexInfo implements FunctionalIndexInfo {
     columnToConvert = Maps.newHashMap();
     exprToConvert = Maps.newHashMap();
     pathsInExpr = Maps.newHashMap();
-    //keep the order of new paths, it may be related to the naming policy
+    // keep the order of new paths, it may be related to the naming policy
     newPathsForIndexedFunction = Sets.newLinkedHashSet();
     allPathsInFunction = Sets.newHashSet();
     init();
@@ -60,15 +60,15 @@ public class MapRDBFunctionalIndexInfo implements FunctionalIndexInfo {
 
   private void init() {
     int count = 0;
-    for(LogicalExpression indexedExpr : indexDesc.getIndexColumns()) {
-      if( !(indexedExpr instanceof SchemaPath) ) {
+    for (LogicalExpression indexedExpr : indexDesc.getIndexColumns()) {
+      if (!(indexedExpr instanceof SchemaPath)) {
         hasFunctionalField = true;
         SchemaPath functionalFieldPath = SchemaPath.getSimplePath("$"+count);
         newPathsForIndexedFunction.add(functionalFieldPath);
 
-        //now we handle only cast expression
-        if(indexedExpr instanceof CastExpression) {
-          //We handle only CAST directly on SchemaPath for now.
+        // now we handle only cast expression
+        if (indexedExpr instanceof CastExpression) {
+          // We handle only CAST directly on SchemaPath for now.
           SchemaPath pathBeingCasted = (SchemaPath)((CastExpression) indexedExpr).getInput();
           addTargetPathForOriginalPath(pathBeingCasted, functionalFieldPath);
           addPathInExpr(indexedExpr, pathBeingCasted);
@@ -119,7 +119,7 @@ public class MapRDBFunctionalIndexInfo implements FunctionalIndexInfo {
    * @return
    */
   public SchemaPath getNewPath(SchemaPath path) {
-    if(columnToConvert.containsKey(path)) {
+    if (columnToConvert.containsKey(path)) {
       return columnToConvert.get(path).iterator().next();
     }
     return null;
@@ -131,7 +131,7 @@ public class MapRDBFunctionalIndexInfo implements FunctionalIndexInfo {
    * @return the renamed schemapath in index table for the indexed expression
    */
   public SchemaPath getNewPathFromExpr(LogicalExpression expr) {
-    if(exprToConvert.containsKey(expr)) {
+    if (exprToConvert.containsKey(expr)) {
       return (SchemaPath)exprToConvert.get(expr);
     }
     return null;
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/planner/index/MapRDBIndexDescriptor.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/planner/index/MapRDBIndexDescriptor.java
index a57f5b5..75e6bc2 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/planner/index/MapRDBIndexDescriptor.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/planner/index/MapRDBIndexDescriptor.java
@@ -122,9 +122,9 @@ public class MapRDBIndexDescriptor extends DrillIndexDescriptor {
       List<LogicalExpression> allCols = Lists.newArrayList();
       Collection<SchemaPath> decoded;
 
-      for(LogicalExpression expr : expressions) {
+      for (LogicalExpression expr : expressions) {
         LogicalExpression nonDecoded = expr.accept(this, null);
-        if(nonDecoded != null) {
+        if (nonDecoded != null) {
           allCols.add(nonDecoded);
         }
       }
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBScanBatchCreator.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBScanBatchCreator.java
index de2817e..2f53398 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBScanBatchCreator.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBScanBatchCreator.java
@@ -33,14 +33,14 @@ import org.apache.drill.exec.store.mapr.db.json.MaprDBJsonRecordReader;
 
 import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
 
-public class MapRDBScanBatchCreator implements BatchCreator<MapRDBSubScan>{
+public class MapRDBScanBatchCreator implements BatchCreator<MapRDBSubScan> {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(MapRDBScanBatchCreator.class);
 
   @Override
   public ScanBatch getBatch(ExecutorFragmentContext context, MapRDBSubScan subScan, List<RecordBatch> children) throws ExecutionSetupException {
     Preconditions.checkArgument(children.isEmpty());
     List<RecordReader> readers = new LinkedList<>();
-    for(MapRDBSubScanSpec scanSpec : subScan.getRegionScanSpecList()){
+    for (MapRDBSubScanSpec scanSpec : subScan.getRegionScanSpecList()) {
       try {
         if (BinaryTableGroupScan.TABLE_BINARY.equals(subScan.getTableType())) {
           readers.add(new HBaseRecordReader(
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBSubScanSpec.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBSubScanSpec.java
index e24438e..7fc2e83 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBSubScanSpec.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBSubScanSpec.java
@@ -23,7 +23,7 @@ import com.mapr.db.index.IndexDesc;
 import com.mapr.fs.jni.MapRConstants;
 import com.mapr.org.apache.hadoop.hbase.util.Bytes;
 
-public class MapRDBSubScanSpec implements Comparable<MapRDBSubScanSpec>{
+public class MapRDBSubScanSpec implements Comparable<MapRDBSubScanSpec> {
 
   protected String tableName;
   protected IndexDesc indexDesc;
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableGroupScan.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableGroupScan.java
index b545262..647fe82 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableGroupScan.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableGroupScan.java
@@ -320,7 +320,7 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
     double totalRowCount = stats.getRowCount(null, null);
     logger.debug("GroupScan {} with stats {}: rowCount={}, condition={}, totalRowCount={}, fullTableRowCount={}",
             System.identityHashCode(this), System.identityHashCode(stats), rowCount,
-            scanSpec.getCondition()==null?"null":scanSpec.getCondition(),
+            scanSpec.getCondition() == null ? "null" : scanSpec.getCondition(),
             totalRowCount, fullTableRowCount);
     // If UNKNOWN, or DB stats sync issues(manifests as 0 rows) use defaults.
     if (rowCount == ROWCOUNT_UNKNOWN || rowCount == 0) {
@@ -377,7 +377,7 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
     PluginCost pluginCostModel = formatPlugin.getPluginCostModel();
     final int avgColumnSize = pluginCostModel.getAverageColumnSize(this);
     boolean filterPushed = (scanSpec.getSerializedFilter() != null);
-    if(scanSpec != null && scanSpec.getIndexDesc() != null) {
+    if (scanSpec != null && scanSpec.getIndexDesc() != null) {
       totalColNum = scanSpec.getIndexDesc().getIncludedFields().size()
           + scanSpec.getIndexDesc().getIndexedFields().size() + 1;
     }
@@ -446,8 +446,8 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
   @Override
   public String toString() {
     return "JsonTableGroupScan [ScanSpec=" + scanSpec + ", columns=" + columns
-        + (maxRecordsToRead>0? ", limit=" + maxRecordsToRead : "")
-        + (getMaxParallelizationWidth()>0? ", maxwidth=" + getMaxParallelizationWidth() : "") + "]";
+        + (maxRecordsToRead > 0 ? ", limit=" + maxRecordsToRead : "")
+        + (getMaxParallelizationWidth() > 0 ? ", maxwidth=" + getMaxParallelizationWidth() : "") + "]";
   }
 
   public JsonScanSpec getScanSpec() {
@@ -498,7 +498,7 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
       indexDesc = (IndexDesc)((MapRDBIndexDescriptor)index).getOriginalDesc();
     }
     // If no index is specified, get it from the primary table
-    if(indexDesc == null && scanSpec.isSecondaryIndex()) {
+    if (indexDesc == null && scanSpec.isSecondaryIndex()) {
       throw new UnsupportedOperationException("getAverageRowSizeStats should be invoked on primary table");
     }
 
@@ -540,12 +540,11 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
    * @return {@link MapRDBStatisticsPayload} statistics
    */
   private MapRDBStatisticsPayload getFirstKeyEstimatedStatsInternal(QueryCondition condition, IndexDesc index, RelNode scanRel) {
-    // double totalRows = getRowCount(null, scanPrel);
 
     // If no index is specified, get it from the primary table
-    if(index == null && scanSpec.isSecondaryIndex()) {
+    if (index == null && scanSpec.isSecondaryIndex()) {
       // If stats not cached get it from the table.
-      //table = MapRDB.getTable(scanSpec.getPrimaryTablePath());
+      // table = MapRDB.getTable(scanSpec.getPrimaryTablePath());
       throw new UnsupportedOperationException("getFirstKeyEstimatedStats should be invoked on primary table");
     }
 
@@ -740,7 +739,7 @@ public class JsonTableGroupScan extends MapRDBGroupScan implements IndexGroupSca
     if (maxRecordsToRead < 0) {
       return true;
     }
-    return false;//limit is already pushed. No more pushdown of limit
+    return false; // limit is already pushed. No more pushdown of limit
   }
 
   @Override
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableRangePartitionFunction.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableRangePartitionFunction.java
index 436347f..c0b73ee 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableRangePartitionFunction.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/JsonTableRangePartitionFunction.java
@@ -117,7 +117,7 @@ public class JsonTableRangePartitionFunction extends AbstractRangePartitionFunct
       if (thisPartRefList.size() != otherPartRefList.size()) {
         return false;
       }
-      for (int refIdx=0; refIdx<thisPartRefList.size(); refIdx++) {
+      for (int refIdx = 0; refIdx < thisPartRefList.size(); refIdx++) {
         if (!thisPartRefList.get(refIdx).equals(otherPartRefList.get(refIdx))) {
           return false;
         }
@@ -148,7 +148,7 @@ public class JsonTableRangePartitionFunction extends AbstractRangePartitionFunct
 
       // Check if key is present in the mid interval of [start, stop].
       // Account for empty byte array start/stop
-      if ( (Bytes.compareTo(encodedKey, start) >= 0 ||
+      if ((Bytes.compareTo(encodedKey, start) >= 0 ||
              Bytes.equals(start, MapRConstants.EMPTY_BYTE_ARRAY)
            ) &&
            (Bytes.compareTo(encodedKey, stop) < 0 ||
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/OjaiFunctionsProcessor.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/OjaiFunctionsProcessor.java
index 959e243..7e29c22 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/OjaiFunctionsProcessor.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/OjaiFunctionsProcessor.java
@@ -47,7 +47,7 @@ class OjaiFunctionsProcessor extends AbstractExprVisitor<Void, Void, RuntimeExce
     final Throwable throwable = new Throwable();
     final StackTraceElement[] ste = throwable.getStackTrace();
     final StringBuilder sb = new StringBuilder();
-    for(int i = 1; i < ste.length; ++i) {
+    for (int i = 1; i < ste.length; ++i) {
       sb.append(ste[i].toString());
       sb.append('\n');
     }
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/RestrictedJsonTableGroupScan.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/RestrictedJsonTableGroupScan.java
index 48ad96d..2f06d00 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/RestrictedJsonTableGroupScan.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/RestrictedJsonTableGroupScan.java
@@ -90,7 +90,7 @@ public class RestrictedJsonTableGroupScan extends JsonTableGroupScan {
   private List<RestrictedMapRDBSubScanSpec> getEndPointFragmentMapping(int minorFragmentId) {
     List<RestrictedMapRDBSubScanSpec> restrictedSubScanSpecList = Lists.newArrayList();
     List<MapRDBSubScanSpec> subScanSpecList = endpointFragmentMapping.get(minorFragmentId);
-    for(MapRDBSubScanSpec s : subScanSpecList) {
+    for (MapRDBSubScanSpec s : subScanSpecList) {
       restrictedSubScanSpecList.add((RestrictedMapRDBSubScanSpec) s);
     }
     return restrictedSubScanSpecList;
@@ -128,7 +128,7 @@ public class RestrictedJsonTableGroupScan extends JsonTableGroupScan {
 
   @Override
   public ScanStats getScanStats() {
-    //TODO: ideally here we should use the rowcount from index scan, and multiply a factor of restricted scan
+    // TODO: ideally here we should use the rowcount from index scan, and multiply a factor of restricted scan
     double rowCount;
     PluginCost pluginCostModel = formatPlugin.getPluginCostModel();
     final int avgColumnSize = pluginCostModel.getAverageColumnSize(this);
@@ -178,7 +178,7 @@ public class RestrictedJsonTableGroupScan extends JsonTableGroupScan {
   public String toString() {
     return "RestrictedJsonTableGroupScan [ScanSpec=" + scanSpec + ", columns=" + columns
         + ", rowcount=" + computeRestrictedScanRowcount()
-        + (maxRecordsToRead>0? ", limit=" + maxRecordsToRead : "")
-        + (getMaxParallelizationWidth()>0? ", maxwidth=" + getMaxParallelizationWidth() : "") + "]";
+        + (maxRecordsToRead > 0 ? ", limit=" + maxRecordsToRead : "")
+        + (getMaxParallelizationWidth() > 0 ? ", maxwidth=" + getMaxParallelizationWidth() : "") + "]";
   }
 }
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/DecodeFieldPath.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/DecodeFieldPath.java
index 6748c4f..3241be4 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/DecodeFieldPath.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/DecodeFieldPath.java
@@ -46,9 +46,9 @@ public class DecodeFieldPath implements DrillSimpleFunc {
         toStringFromUTF8(input.start, input.end, input.buffer).split(",");
     String[] decodedPaths = org.apache.drill.exec.util.EncodedSchemaPathSet.decode(encodedPaths);
     java.util.Arrays.sort(decodedPaths);
-    
+
     StringBuilder sb = new StringBuilder();
-    for(String decodedPath : decodedPaths) {
+    for (String decodedPath : decodedPaths) {
       sb.append(", ").append(org.ojai.FieldPath.parseFrom(decodedPath).asPathString());
     }
     String outputString = "[" + sb.substring(2) + "]";
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/MatchesPlaceholder.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/MatchesPlaceholder.java
index 6aad44e..38f61c3 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/MatchesPlaceholder.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/MatchesPlaceholder.java
@@ -34,10 +34,9 @@ import org.apache.drill.exec.expr.holders.VarCharHolder;
  * which will replace this function with the real OJAI equivalent to be pushed down.
  * Therefore, there's no implementation here.
  */
-@FunctionTemplate(
-                name="ojai_matches",
-                scope=FunctionTemplate.FunctionScope.SIMPLE,
-                nulls=FunctionTemplate.NullHandling.INTERNAL)
+@FunctionTemplate(name = "ojai_matches",
+                scope = FunctionTemplate.FunctionScope.SIMPLE,
+                nulls = FunctionTemplate.NullHandling.INTERNAL)
 public class MatchesPlaceholder implements DrillSimpleFunc {
 
         @Param BigIntHolder /*FieldReader*/ field;
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/NotMatchesPlaceholder.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/NotMatchesPlaceholder.java
index 56baebb..248579f 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/NotMatchesPlaceholder.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/NotMatchesPlaceholder.java
@@ -34,10 +34,9 @@ import org.apache.drill.exec.expr.holders.VarCharHolder;
  * which will replace this function with the real OJAI equivalent to be pushed down.
  * Therefore, there's no implementation here.
  */
-@FunctionTemplate(
-                name="ojai_notmatches",
-                scope=FunctionTemplate.FunctionScope.SIMPLE,
-                nulls=FunctionTemplate.NullHandling.INTERNAL)
+@FunctionTemplate(name = "ojai_notmatches",
+                scope = FunctionTemplate.FunctionScope.SIMPLE,
+                nulls = FunctionTemplate.NullHandling.INTERNAL)
 public class NotMatchesPlaceholder implements DrillSimpleFunc {
 
         @Param BigIntHolder /*FieldReader*/ field;
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/NotTypeOfPlaceholder.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/NotTypeOfPlaceholder.java
index 6c01a48..78abcc0 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/NotTypeOfPlaceholder.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/NotTypeOfPlaceholder.java
@@ -34,10 +34,9 @@ import org.apache.drill.exec.expr.holders.IntHolder;
  * which will replace this function with the real OJAI equivalent to be pushed down.
  * Therefore, there's no implementation here.
  */
-@FunctionTemplate(
-                name="ojai_nottypeof",
-                scope=FunctionTemplate.FunctionScope.SIMPLE,
-                nulls=FunctionTemplate.NullHandling.INTERNAL)
+@FunctionTemplate(name = "ojai_nottypeof",
+                scope = FunctionTemplate.FunctionScope.SIMPLE,
+                nulls = FunctionTemplate.NullHandling.INTERNAL)
 public class NotTypeOfPlaceholder implements DrillSimpleFunc {
 
         @Param BigIntHolder /*FieldReader*/ field;
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/SizeOfPlaceholder.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/SizeOfPlaceholder.java
index 7d7150c..8fcfee8 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/SizeOfPlaceholder.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/SizeOfPlaceholder.java
@@ -34,10 +34,9 @@ import org.apache.drill.exec.expr.holders.VarCharHolder;
  * which will replace this function with the real OJAI equivalent to be pushed down.
  * Therefore, there's no implementation here.
  */
-@FunctionTemplate(
-                name="ojai_sizeof",
-                scope=FunctionTemplate.FunctionScope.SIMPLE,
-                nulls=FunctionTemplate.NullHandling.INTERNAL)
+@FunctionTemplate(name = "ojai_sizeof",
+                scope = FunctionTemplate.FunctionScope.SIMPLE,
+                nulls = FunctionTemplate.NullHandling.INTERNAL)
 public class SizeOfPlaceholder implements DrillSimpleFunc {
 
         @Param BigIntHolder /*FieldReader*/ field;
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/TypeOfPlaceholder.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/TypeOfPlaceholder.java
index 1d1efc0..585404f 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/TypeOfPlaceholder.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/udf/mapr/db/TypeOfPlaceholder.java
@@ -34,10 +34,9 @@ import org.apache.drill.exec.expr.holders.IntHolder;
  * which will replace this function with the real OJAI equivalent to be pushed down.
  * Therefore, there's no implementation here.
  */
-@FunctionTemplate(
-                name="ojai_typeof",
-                scope=FunctionTemplate.FunctionScope.SIMPLE,
-                nulls=FunctionTemplate.NullHandling.INTERNAL)
+@FunctionTemplate(name = "ojai_typeof",
+                scope = FunctionTemplate.FunctionScope.SIMPLE,
+                nulls = FunctionTemplate.NullHandling.INTERNAL)
 public class TypeOfPlaceholder implements DrillSimpleFunc {
 
         @Param BigIntHolder /*FieldReader*/ field;
diff --git a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/IndexHintPlanTest.java b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/IndexHintPlanTest.java
index 9ac27b4..c6ec0fe 100644
--- a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/IndexHintPlanTest.java
+++ b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/IndexHintPlanTest.java
@@ -46,7 +46,7 @@ public class IndexHintPlanTest extends IndexPlanTest {
                 new String[]{"RowKeyJoin"}
         );
 
-        //default plan picked by optimizer.
+        // default plan picked by optimizer.
         PlanTestBase.testPlanMatchingPatterns(query,
                 new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*indexName=i_ssn"},
                 new String[]{"RowKeyJoin"}
@@ -75,13 +75,12 @@ public class IndexHintPlanTest extends IndexPlanTest {
                 new String[]{"RowKeyJoin"}
         );
 
-        //default plan picked by optimizer
+        // default plan picked by optimizer
         PlanTestBase.testPlanMatchingPatterns(query,
                 new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*indexName=(i_state_city|i_state_age_phone)"},
                 new String[]{"RowKeyJoin"}
         );
 
-        return;
     }
 
     @Test
@@ -101,21 +100,20 @@ public class IndexHintPlanTest extends IndexPlanTest {
                 new String[]{"RowKeyJoin"}
         );
 
-        //default plan picked by query optimizer.
+        // default plan picked by query optimizer.
         PlanTestBase.testPlanMatchingPatterns(query,
                 new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*indexName=(i_state_city|i_state_age_phone)"},
                 new String[]{"RowKeyJoin"}
         );
 
-        return;
     }
 
-    //Negative cases
+    // Negative cases
 
     @Test
     // A testcase where there are multiple index to pick from but none of them equals to the index provided as hint (index hint is wrong).
-    //In this index is not at all present in the table hence it falls back to the case where the index itself is not given.
-    //Hence here one of the i_state_city or i_state_age_lic will be selected depending upon the cost.
+    // In this index is not at all present in the table hence it falls back to the case where the index itself is not given.
+    // Hence here one of the i_state_city or i_state_age_lic will be selected depending upon the cost.
     public void testWithMultipleIndexesButNoIndexWithHint() throws Exception {
 
         String hintquery = "SELECT t.`address`.`state` AS `state` FROM table(hbase.`index_test_primary`(type => 'maprdb', index => 'i_state_and_city')) as t " +
@@ -126,7 +124,6 @@ public class IndexHintPlanTest extends IndexPlanTest {
                 new String[]{"RowKeyJoin"}
         );
 
-        return;
     }
 
     @Test
@@ -150,12 +147,11 @@ public class IndexHintPlanTest extends IndexPlanTest {
                 new String[]{"RowKeyJoin"}
         );
 
-        return;
     }
 
 
     @Test
-    //Covering index should be generated for a simple query instead of a RowKeyJoin.
+    // Covering index should be generated for a simple query instead of a RowKeyJoin.
     public void testSimpleNoRowKeyJoin() throws Exception {
         String query = "SELECT `reverseid` from table(hbase.`index_test_primary`(type => 'maprdb', index => 'hash_i_reverseid'))  " +
                 "where `reverseid` = 1234";
@@ -166,6 +162,5 @@ public class IndexHintPlanTest extends IndexPlanTest {
                 new String[]{"RowKeyJoin"}
         );
 
-        return;
    }
 }
diff --git a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/IndexPlanTest.java b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/IndexPlanTest.java
index c0ea2a0..a9de9e3 100644
--- a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/IndexPlanTest.java
+++ b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/IndexPlanTest.java
@@ -146,14 +146,11 @@ public class IndexPlanTest extends BaseJsonTest {
         new String[]{"RowKeyJoin"}
     );
 
-    System.out.println("Covering Plan Verified!");
-
     testBuilder()
         .sqlQuery(query)
         .ordered()
         .baselineColumns("phone").baselineValues("6500005471")
         .go();
-    return;
 
   }
 
@@ -167,8 +164,6 @@ public class IndexPlanTest extends BaseJsonTest {
         new String[]{"RowKeyJoin"}
     );
 
-    System.out.println("Covering Plan Verified!");
-
     testBuilder()
         .optionSettingQueriesForTestQuery(defaultHavingIndexPlan)
         .sqlQuery(query)
@@ -176,7 +171,6 @@ public class IndexPlanTest extends BaseJsonTest {
         .baselineColumns("ssn").baselineValues("100007423")
         .go();
 
-    return;
   }
 
   @Test
@@ -190,8 +184,6 @@ public class IndexPlanTest extends BaseJsonTest {
         new String[]{"RowKeyJoin", "indexName="}
     );
 
-    System.out.println("No Index Plan Verified!");
-
     testBuilder()
         .sqlQuery(query)
         .unOrdered()
@@ -199,7 +191,6 @@ public class IndexPlanTest extends BaseJsonTest {
         .baselineColumns("ssn").baselineValues("100007632")
         .go();
 
-    return;
   }
 
   @Test
@@ -214,15 +205,12 @@ public class IndexPlanTest extends BaseJsonTest {
         new String[]{}
     );
 
-    System.out.println("Non-Covering Plan Verified!");
-
     testBuilder()
         .sqlQuery(query)
         .ordered()
         .baselineColumns("fname").baselineValues("KfFzK")
         .go();
 
-    return;
   }
 
   @Test
@@ -276,7 +264,6 @@ public class IndexPlanTest extends BaseJsonTest {
         .baselineColumns("tid").baselineValues("1012")
         .go();
 
-    return;
   }
 
   @Test
@@ -298,7 +285,6 @@ public class IndexPlanTest extends BaseJsonTest {
         .baselineColumns("rowid").baselineValues("1012")
         .go();
 
-    return;
   }
 
   @Test
@@ -322,7 +308,7 @@ public class IndexPlanTest extends BaseJsonTest {
     } finally {
       test(defaultRowKeyJoinBackIOFactor);
     }
-    return;
+
   }
 
   @Test
@@ -356,7 +342,7 @@ public class IndexPlanTest extends BaseJsonTest {
     } finally {
       test(defaultIntersectPlans + ";" + enableFTS);
     }
-    return;
+
   }
 
   @Test
@@ -391,7 +377,7 @@ public class IndexPlanTest extends BaseJsonTest {
     } finally {
       test(defaultRowKeyJoinBackIOFactor);
     }
-    return;
+
   }
 
   @Test//filter cover indexed, included and not in index at all filter
@@ -420,8 +406,8 @@ public class IndexPlanTest extends BaseJsonTest {
         .build()
         .run();
 
-    return;
   }
+
   @Test
   public void CompositeIndexCoveringPlan() throws Exception {
 
@@ -448,7 +434,7 @@ public class IndexPlanTest extends BaseJsonTest {
         .sqlBaselineQuery(query)
         .build()
         .run();
-    return;
+
   }
 
   @Test
@@ -477,12 +463,12 @@ public class IndexPlanTest extends BaseJsonTest {
       test(defaultHavingIndexPlan);
       test(sliceTargetDefault);
     }
-    return;
+
   }
 
   @Test
   public void TestCastVarCharCoveringPlan() throws Exception {
-    //length 255 is to exact match the casted indexed field's length
+    // length 255 is to exact match the casted indexed field's length
     String query = "SELECT t._id as tid, cast(t.driverlicense as varchar(255)) as driverlicense FROM hbase.`index_test_primary` as t " +
         " where cast(t.driverlicense as varchar(255))='100007423'";
     test(defaultHavingIndexPlan);
@@ -491,8 +477,6 @@ public class IndexPlanTest extends BaseJsonTest {
         new String[]{"RowKeyJoin"}
     );
 
-    System.out.println("TestCastCoveringPlan Plan Verified!");
-
     testBuilder()
         .optionSettingQueriesForTestQuery(defaultHavingIndexPlan)
         .sqlQuery(query)
@@ -500,7 +484,6 @@ public class IndexPlanTest extends BaseJsonTest {
         .baselineColumns("tid", "driverlicense").baselineValues("1012", "100007423")
         .go();
 
-    return;
   }
 
   @Test
@@ -513,8 +496,6 @@ public class IndexPlanTest extends BaseJsonTest {
         new String[]{"RowKeyJoin"}
     );
 
-    System.out.println("TestCastCoveringPlan Plan Verified!");
-
     testBuilder()
         .optionSettingQueriesForTestQuery(defaultHavingIndexPlan)
         .sqlQuery(query)
@@ -522,7 +503,6 @@ public class IndexPlanTest extends BaseJsonTest {
         .baselineColumns("tid", "ssn", "phone").baselineValues("1012", 100007423, "6500005471")
         .go();
 
-    return;
   }
 
   @Test
@@ -535,14 +515,12 @@ public class IndexPlanTest extends BaseJsonTest {
         new String[]{}
     );
 
-    System.out.println("TestCastNonCoveringPlan Plan Verified!");
-
     testBuilder()
         .sqlQuery(query)
         .ordered()
         .baselineColumns("ssn").baselineValues("100007423")
         .go();
-    return;
+
   }
 
   @Test
@@ -555,15 +533,12 @@ public class IndexPlanTest extends BaseJsonTest {
         new String[]{}
     );
 
-    System.out.println("TestCastVarchar_ConvertToRangePlan Verified!");
-
     testBuilder()
         .sqlQuery(query)
         .ordered()
         .baselineColumns("ssn").baselineValues("100007423")
         .go();
 
-    return;
   }
 
   @Test // cast expression in filter is not indexed, but the same field casted to different type was indexed (CAST id.ssn as INT)
@@ -579,7 +554,7 @@ public class IndexPlanTest extends BaseJsonTest {
 
   @Test
   public void TestLongerCastVarCharNoIndex() throws Exception {
-    //length 256 is to exact match the casted indexed field's length
+    // length 256 is to exact match the casted indexed field's length
     String query = "SELECT t._id as tid, cast(t.driverlicense as varchar(500)) as driverlicense FROM hbase.`index_test_primary` as t " +
         " where cast(t.driverlicense as varchar(500))='100007423'";
     test(defaultHavingIndexPlan);
@@ -588,9 +563,6 @@ public class IndexPlanTest extends BaseJsonTest {
         new String[]{"RowKeyJoin", "indexName="}
     );
 
-    System.out.println("TestLongerCastVarCharNoIndex Plan Verified!");
-
-    return;
   }
 
   @Test
@@ -666,7 +638,7 @@ public class IndexPlanTest extends BaseJsonTest {
         new String[]{"Sort"}
     );
 
-    //simple field, driverlicense
+    // simple field, driverlicense
     testBuilder()
         .sqlQuery(query)
         .ordered()
@@ -675,7 +647,7 @@ public class IndexPlanTest extends BaseJsonTest {
         .baselineColumns("phone").baselineValues("6500001595")
         .go();
 
-    //query on field of item expression(having capProject), non-simple field t.id.ssn
+    // query on field of item expression(having capProject), non-simple field t.id.ssn
     testBuilder()
         .sqlQuery(query2)
         .ordered()
@@ -696,7 +668,7 @@ public class IndexPlanTest extends BaseJsonTest {
     }
   }
 
-  //test cases are from TestNonCoveringPlanSortRemoved. Sort was removed when force_sort_noncovering was default(false)
+  // test cases are from TestNonCoveringPlanSortRemoved. Sort was removed when force_sort_noncovering was default(false)
   @Test
   public void TestNonCoveringPlanWithNoRemoveSortOption() throws Exception {
     try {
@@ -717,7 +689,7 @@ public class IndexPlanTest extends BaseJsonTest {
           new String[]{}
       );
 
-      //simple field, driverlicense
+      // simple field, driverlicense
       testBuilder()
           .sqlQuery(query)
           .ordered()
@@ -726,7 +698,7 @@ public class IndexPlanTest extends BaseJsonTest {
           .baselineColumns("phone").baselineValues("6500001595")
           .go();
 
-      //query on field of item expression(having capProject), non-simple field t.id.ssn
+      // query on field of item expression(having capProject), non-simple field t.id.ssn
       testBuilder()
           .sqlQuery(query2)
           .ordered()
@@ -795,7 +767,7 @@ public class IndexPlanTest extends BaseJsonTest {
         " where t.address.state = 'wo' and t.personal.age = 35 and t.contact.phone < '6500003000' order by t.contact.phone";
     test(defaultHavingIndexPlan);
 
-    //we should glue to index i_state_age_phone to make sure we are testing the targeted prefix construction code path
+    // we should glue to index i_state_age_phone to make sure we are testing the targeted prefix construction code path
     PlanTestBase.testPlanMatchingPatterns(query,
         new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*indexName=i_state_age_phone"},
         new String[]{"Sort"}
@@ -833,7 +805,7 @@ public class IndexPlanTest extends BaseJsonTest {
       .run();
   }
 
-  @Test  //ORDER BY last two columns not in the indexed order; Sort SHOULD NOT be dropped
+  @Test  // ORDER BY last two columns not in the indexed order; Sort SHOULD NOT be dropped
   public void TestCoveringPlanSortPrefix_3() throws Exception {
     String query = "SELECT CAST(t.personal.age as VARCHAR) as age, t.contact.phone FROM hbase.`index_test_primary` as t " +
         " where t.address.state = 'wo' and t.personal.age < 35 and t.contact.phone < '6500003000' order by t.contact.phone, t.personal.age";
@@ -976,7 +948,7 @@ public class IndexPlanTest extends BaseJsonTest {
         .go();
   }
 
-  @Test //non-covering plan. order by cast indexed field, sort SHOULD be removed
+  @Test // non-covering plan. order by cast indexed field, sort SHOULD be removed
   public void orderByCastNonCoveringPlan() throws Exception {
     String query = "SELECT t.name.lname as lname FROM hbase.`index_test_primary` as t " +
         " where CAST(t.id.ssn as INT) < 100000003 order by CAST(t.id.ssn as INT)";
@@ -996,8 +968,8 @@ public class IndexPlanTest extends BaseJsonTest {
   }
 
 
-  @Ignore //in statsCache, condition state+city has rowcount 1250, but state only has 1000. so it is picking i_state_age_phone
-  @Test //non-covering, order by non leading field, and leading fields are not in equality condition, Sort SHOULD NOT be removed
+  @Ignore // in statsCache, condition state+city has rowcount 1250, but state only has 1000. so it is picking i_state_age_phone
+  @Test // non-covering, order by non leading field, and leading fields are not in equality condition, Sort SHOULD NOT be removed
   public void NonCoveringPlan_SortPrefix_1() throws Exception {
 
     String query = "SELECT t.`id`.`ssn` AS `ssn` FROM hbase.`index_test_primary` as t " +
@@ -1008,10 +980,10 @@ public class IndexPlanTest extends BaseJsonTest {
             "RowKeyJoin(.*[\n\r])+.*RestrictedJsonTableGroupScan(.*[\n\r])+.*JsonTableGroupScan.*indexName=i_state_city"},
         new String[]{}
     );
-    return;
+
   }
 
-  @Test //non-covering, order by non leading field, and leading fields are in equality condition, Sort SHOULD be removed
+  @Test // non-covering, order by non leading field, and leading fields are in equality condition, Sort SHOULD be removed
   public void NonCoveringPlan_SortPrefix_2() throws Exception {
 
     String query = "SELECT t.`id`.`ssn` AS `ssn` FROM hbase.`index_test_primary` as t " +
@@ -1022,11 +994,11 @@ public class IndexPlanTest extends BaseJsonTest {
             "RowKeyJoin(.*[\n\r])+.*RestrictedJsonTableGroupScan(.*[\n\r])+.*JsonTableGroupScan.*indexName=i_state_city"},
         new String[]{"Sort"}
     );
-    return;
+
   }
 
   @Ignore ("Should be modified to get an index plan; not very useful since most covering plan filters get pushed")
-  @Test //Correct projection and results when filter on non-indexed column in covering plan.
+  @Test // Correct projection and results when filter on non-indexed column in covering plan.
   public void nonIndexedColumnFilterCoveringPlan() throws Exception {
     String query = "SELECT t.name.fname as fname FROM hbase.`index_test_primary` as t " +
         " where t.personal.age > 68 and t.name.fname IN ('CnGobfR', 'THOHP')";
@@ -1046,7 +1018,7 @@ public class IndexPlanTest extends BaseJsonTest {
   }
 
   @Test
-  @Ignore ("Fix after MEP 5.0")
+  @Ignore
   public void orderByLimitNonCoveringPlan() throws Exception {
     String query = "SELECT t.name.lname as lname FROM hbase.`index_test_primary` as t " +
         " where t.id.ssn < '100000003' order by t.id.ssn limit 2";
@@ -1260,7 +1232,7 @@ public class IndexPlanTest extends BaseJsonTest {
     String query = "SELECT cast(t.activity.irs.firstlogin as timestamp) AS `firstlogin`, t.id.ssn as ssn FROM hbase.`index_test_primary` as t " +
         "order by cast(t.activity.irs.firstlogin as timestamp), t.id.ssn limit 2";
     test(defaultHavingIndexPlan);
-    //no collation for hash index so Sort or TopN must have been preserved
+    // no collation for hash index so Sort or TopN must have been preserved
     PlanTestBase.testPlanMatchingPatterns(query,
         new String[] {"(Sort|TopN)"},
         new String[]{"indexName="}
@@ -1295,7 +1267,7 @@ public class IndexPlanTest extends BaseJsonTest {
         .run();
   }
 
-  @Test //negative case for no filter plan
+  @Test // negative case for no filter plan
   public void testNoFilterOrderByNoIndexMatch() throws Exception {
     String query = "SELECT t.`id`.`ssn` AS `ssn`, t.contact.phone as phone FROM hbase.`index_test_primary` as t " +
         "order by t.name.fname limit 2";
@@ -1306,27 +1278,30 @@ public class IndexPlanTest extends BaseJsonTest {
     );
   }
 
-// Enable this testcase once MD-2848 is fixed.
-//  @Test
-//  public void IntersectPlanWithOneSideNoRows() throws Exception {
-//    try {
-//      String query = "SELECT t.`name`.`lname` AS `lname` FROM hbase.`index_test_primary` as t " +
-//              " where t.personal.age = 53 AND t.personal.income=111145";
-//      test(defaultHavingIndexPlan);
-//      test(preferIntersectPlans + ";" + disableFTS);
-//      PlanTestBase.testPlanMatchingPatterns(query,
-//              new String[]{"RowKeyJoin(.*[\n\r])+.*RestrictedJsonTableGroupScan(.*[\n\r])+.*HashJoin(.*[\n\r])+.*JsonTableGroupScan.*indexName=(i_age|i_income)(.*[\n\r])+.*JsonTableGroupScan.*indexName=(i_age|i_income)"},
-//              new String[]{}
-//      );
-//
-//      testNoResult(query);
-//
-//    } finally {
-//      test(defaultIntersectPlans + ";" + enableFTS);
-//    }
-//  }
-
-  //"i_cast_age_state_phone", "$CAST(personal.age@STRING),address.state,contact.phone", "name.fname",
+// This test case encounters an error :
+//  "Error: SYSTEM ERROR: IllegalStateException: Batch data read operation (iterator()) attempted when last
+//                next() call on batch [#16, ScanBatch] returned NONE (not OK or OK_NEW_SCHEMA)."
+// TODO: fix the root cause of the above error then enable the test
+  @Test
+  @Ignore
+  public void IntersectPlanWithOneSideNoRows() throws Exception {
+    try {
+      String query = "SELECT t.`name`.`lname` AS `lname` FROM hbase.`index_test_primary` as t " +
+              " where t.personal.age = 53 AND t.personal.income=111145";
+      test(defaultHavingIndexPlan);
+      test(preferIntersectPlans + ";" + disableFTS);
+      PlanTestBase.testPlanMatchingPatterns(query,
+              new String[]{"RowKeyJoin(.*[\n\r])+.*RestrictedJsonTableGroupScan(.*[\n\r])+.*HashJoin(.*[\n\r])+.*JsonTableGroupScan.*indexName=(i_age|i_income)(.*[\n\r])+.*JsonTableGroupScan.*indexName=(i_age|i_income)"},
+              new String[]{}
+      );
+
+      testNoResult(query);
+
+    } finally {
+      test(defaultIntersectPlans + ";" + enableFTS);
+    }
+  }
+
   @Test
   public void testTrailingFieldIndexCovering() throws Exception {
     String query = "SELECT t.`name`.`fname` AS `fname` FROM hbase.`index_test_primary` as t " +
@@ -1372,7 +1347,7 @@ public class IndexPlanTest extends BaseJsonTest {
     try {
       test(defaultHavingIndexPlan);
       test(disableHashAgg);
-      //no collation for hash index so Sort or TopN must have been preserved
+      // no collation for hash index so Sort or TopN must have been preserved
       PlanTestBase.testPlanMatchingPatterns(query,
               new String[]{"indexName=i_lic", "StreamAgg"},
               new String[]{"(Sort|TopN)"}
@@ -1396,7 +1371,7 @@ public class IndexPlanTest extends BaseJsonTest {
     String query = " select t1.driverlicense from hbase.`index_test_primary` t1" +
             " order by t1.driverlicense desc limit 2";
     test(defaultHavingIndexPlan);
-    //no collation for hash index so Sort or TopN must have been preserved
+    // no collation for hash index so Sort or TopN must have been preserved
     PlanTestBase.testPlanMatchingPatterns(query,
             new String[] {"(Sort|TopN)"},
             new String[]{"indexName="}
@@ -1418,7 +1393,7 @@ public class IndexPlanTest extends BaseJsonTest {
     try {
       test(defaultHavingIndexPlan);
       test(disableHashAgg);
-      //no collation for hash index so Sort or TopN must have been preserved
+      // no collation for hash index so Sort or TopN must have been preserved
       PlanTestBase.testPlanMatchingPatterns(query,
               new String[]{"indexName=i_lic", "StreamAgg"},
               new String[]{"(Sort|TopN)"}
@@ -1492,7 +1467,7 @@ public class IndexPlanTest extends BaseJsonTest {
     try {
       test(defaultHavingIndexPlan);
       test(disableHashAgg);
-      //no collation for hash index so Sort or TopN must have been preserved
+      // no collation for hash index so Sort or TopN must have been preserved
       PlanTestBase.testPlanMatchingPatterns(query,
               new String[]{"(Sort|TopN)", "StreamAgg"},
               new String[]{"indexName="}
@@ -1539,7 +1514,7 @@ public class IndexPlanTest extends BaseJsonTest {
     }
   }
 
-  @Test //negative case for no filter plan
+  @Test // negative case for no filter plan
   public void testNoFilterGroupByNoIndexMatch() throws Exception {
     String query = "SELECT max(t.`id`.`ssn`) AS `ssn`, max(t.contact.phone) as phone FROM hbase.`index_test_primary` as t " +
             "group by t.name.fname limit 2";
diff --git a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/LargeTableGen.java b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/LargeTableGen.java
index bc857d1..347f7f2 100644
--- a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/LargeTableGen.java
+++ b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/LargeTableGen.java
@@ -67,7 +67,7 @@ public class LargeTableGen extends LargeTableGenBase {
   Table createOrGetTable(String tableName, int recordNum) {
     if (admin.tableExists(tableName)) {
       return MapRDBImpl.getTable(tableName);
-      //admin.deleteTable(tableName);
+      // admin.deleteTable(tableName);
     }
     else {
       TableDescriptor desc = new TableDescriptorImpl(new Path(tableName));
@@ -76,7 +76,7 @@ public class LargeTableGen extends LargeTableGenBase {
 
       String[] splitsStr = new String[splits];
       StringBuilder strBuilder = new StringBuilder("Splits:");
-      for(int i=0; i<splits; ++i) {
+      for (int i = 0; i < splits; ++i) {
         splitsStr[i] = String.format("%d", (i+1)*SPLIT_SIZE);
         strBuilder.append(splitsStr[i] + ", ");
       }
@@ -87,20 +87,20 @@ public class LargeTableGen extends LargeTableGenBase {
   }
 
   private void createIndex(Table table, String[] indexDef) throws Exception {
-    if(indexDef == null) {
-      //don't create index here. indexes may have been created
+    if (indexDef == null) {
+      // don't create index here. indexes may have been created
       return;
     }
-    for(int i=0; i<indexDef.length / 3; ++i) {
+    for (int i = 0; i < indexDef.length / 3; ++i) {
       String indexCmd = String.format("maprcli table index add"
           + " -path " + table.getPath()
           + " -index %s"
           + " -indexedfields '%s'"
           + ((indexDef[3 * i + 2].length()==0)?"":" -includedfields '%s'")
           + ((indexDef[3 * i].startsWith("hash"))? " -hashed true" : ""),
-          indexDefInCommand(indexDef[3 * i]), //index name
-          indexDefInCommand(indexDef[3 * i + 1]), //indexedfields
-          indexDefInCommand(indexDef[3 * i + 2])); //includedfields
+          indexDefInCommand(indexDef[3 * i]), // index name
+          indexDefInCommand(indexDef[3 * i + 1]), // indexedfields
+          indexDefInCommand(indexDef[3 * i + 2])); // includedfields
       System.out.println(indexCmd);
 
       TestCluster.runCommand(indexCmd);
@@ -111,8 +111,8 @@ public class LargeTableGen extends LargeTableGenBase {
   private String indexDefInCommand(String def) {
     String[] splitted = def.split(",");
     StringBuffer ret = new StringBuffer();
-    for(String field: splitted) {
-      if(ret.length() == 0) {
+    for (String field: splitted) {
+      if (ret.length() == 0) {
         ret.append(field);
       }
       else {
@@ -129,14 +129,14 @@ public class LargeTableGen extends LargeTableGenBase {
     DBTests.setTableStatsSendInterval(1);
 
     if (admin.tableExists(tablePath)) {
-      //admin.deleteTable(tablePath);
+      // admin.deleteTable(tablePath);
     }
 
-    //create Json String
+    // create Json String
     int batch, i;
     int BATCH_SIZE=2000;
     try (Table table = createOrGetTable(tablePath, recordNumber)) {
-      //create index
+      // create index
       createIndex(table, indexDef);
       for (batch = 0; batch < recordNumber; batch += BATCH_SIZE) {
         int batchStop = Math.min(recordNumber, batch + BATCH_SIZE);
@@ -156,13 +156,9 @@ public class LargeTableGen extends LargeTableGenBase {
         }
         try (InputStream in = new StringBufferInputStream(strBuf.toString());
              DocumentStream stream = Json.newDocumentStream(in)) {
-          //write by individual document
-          //for (Document document : stream) {
-          //  table.insert(document, "rowid");
-          //}
           try {
-            table.insert(stream, "rowid"); //insert a batch  of document in stream
-          }catch(Exception e) {
+            table.insert(stream, "rowid"); // insert a batch  of document in stream
+          } catch(Exception e) {
             System.out.println(stream.toString());
             throw e;
           }
diff --git a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/LargeTableGenBase.java b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/LargeTableGenBase.java
index 917f42a..bbba08f 100644
--- a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/LargeTableGenBase.java
+++ b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/LargeTableGenBase.java
@@ -46,45 +46,44 @@ public class LargeTableGenBase {
       cities = new ArrayList<>();
       List<String> states = new ArrayList<>();
 
-      int fnNum = 2000; //2k
-      int lnNum = 200000;//200k
-      int cityNum = 10000;//10k
+      int fnNum = 2000;     // 2k
+      int lnNum = 200000;   // 200k
+      int cityNum = 10000;  // 10k
       int stateNum = 50;
       Random rand = new Random(2017);
       int i;
       try {
         Set<String> strSet = new LinkedHashSet<>();
-        while(strSet.size() < stateNum) {
+        while (strSet.size() < stateNum) {
           strSet.add(RandomStringUtils.random(2, 0, 0, true, false, null, rand));
         }
         states.addAll(strSet);
 
         strSet = new LinkedHashSet<>();
-        while(strSet.size() < cityNum) {
+        while (strSet.size() < cityNum) {
           int len = 3 + strSet.size() % 6;
           strSet.add(RandomStringUtils.random(len, 0, 0, true, false, null, rand));
         }
 
         Iterator<String> it = strSet.iterator();
-        for(i=0; i<cityNum; ++i) {
+        for (i = 0; i < cityNum; ++i) {
           cities.add(new String[]{"10000", states.get(i%stateNum),  it.next()});
         }
 
         strSet = new LinkedHashSet<>();
-        while(strSet.size() < fnNum) {
+        while (strSet.size() < fnNum) {
           int len = 3 + strSet.size() % 6;
           strSet.add(RandomStringUtils.random(len, 0, 0, true, false, null, rand));
         }
         firstnames.addAll(strSet);
 
         strSet = new LinkedHashSet<>();
-        while(strSet.size() < lnNum) {
+        while (strSet.size() < lnNum) {
           int len = 3 + strSet.size() % 6;
           strSet.add(RandomStringUtils.random(len, 0, 0, true, false, null, rand));
         }
         lastnames.addAll(strSet);
-      }
-      catch(Exception e) {
+      } catch(Exception e) {
         System.out.println("init data got exception");
         e.printStackTrace();
       }
@@ -109,7 +108,7 @@ public class LargeTableGenBase {
   }
 
   protected String getPhone(int i) {
-    //80% phones are unique,
+    // 80% phones are unique,
     return String.format("%d", 6500*1000*1000L + randomized[ (randomized.length - i) %((int) (randomized.length * 0.8)) ]);
   }
 
@@ -125,14 +124,14 @@ public class LargeTableGenBase {
     return String.format("%d",randomized[i%randomized.length] % 47 + 1);
   }
 
-  //date yyyy-mm-dd
+  // date yyyy-mm-dd
   protected String getBirthdate(int i) {
     int thisseed = randomized[i%randomized.length];
     return String.format("%d-%02d-%02d",
         2016 - (thisseed % 60 + 10), thisseed % 12 + 1, (thisseed * 31) % 28 + 1 );
   }
 
-  //timestamp, yyyy-mm-dd HH:mm:ss
+  // timestamp, yyyy-mm-dd HH:mm:ss
   protected String getFirstLogin(int i) {
     int thisseed = randomized[i%randomized.length];
     int nextseed = randomized[(i+1)%randomized.length];
diff --git a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/StatisticsTest.java b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/StatisticsTest.java
index 36e25ab..20f1090 100644
--- a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/StatisticsTest.java
+++ b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/StatisticsTest.java
@@ -56,8 +56,7 @@ public class StatisticsTest extends IndexPlanTest {
         + " where (t.personal.age < 30 or t.personal.age > 100)"
         + " and (t.address.state = 'mo' or t.address.state = 'ca')";
     PlanTestBase.testPlanMatchingPatterns(explain+query,
-        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"},
-        new String[] {}
+        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"}
     );
 
     // Top-level ORs - Cannot split top-level ORs so use defaults
@@ -65,8 +64,7 @@ public class StatisticsTest extends IndexPlanTest {
         + " where (t.personal.age > 30 and t.personal.age < 100)"
         + " or (t.address.state = 'mo')";
     PlanTestBase.testPlanMatchingPatterns(explain+query,
-        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"},
-        new String[] {}
+        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"}
     );
 
     // ANDed condition - Leading index column(personal.age) and non-leading column(address.city)
@@ -74,8 +72,7 @@ public class StatisticsTest extends IndexPlanTest {
         + " where (t.personal.age < 30 or t.personal.age > 100)"
         + " and `address.city` = 'sf'";
     PlanTestBase.testPlanMatchingPatterns(explain+query,
-        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"},
-        new String[] {}
+        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"}
     );
 
     // ANDed condition - Leading index columns (address.state) and (address.city)
@@ -83,8 +80,7 @@ public class StatisticsTest extends IndexPlanTest {
         + " where (`address.state` = 'mo' or `address.state` = 'ca') " // Leading index column
         + " and `address.city` = 'sf'";                                // Non leading index column
     PlanTestBase.testPlanMatchingPatterns(explain+query,
-        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"},
-        new String[] {}
+        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"}
     );
 
     // ANDed condition - Leading index columns (address.state) and non-index column (name.fname)
@@ -92,24 +88,21 @@ public class StatisticsTest extends IndexPlanTest {
         + " where (`address.state` = 'mo' or `address.state` = 'ca') " // Leading index column
         + " and `name.fname` = 'VcFahj'";                              // Non index column
     PlanTestBase.testPlanMatchingPatterns(explain+query,
-        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"},
-        new String[] {}
+        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"}
     );
 
     // Simple condition - LIKE predicate
     query = "select t._id as rowid from hbase.`index_test_primary` as t "
         + "where t.driverlicense like '100007423%'";
     PlanTestBase.testPlanMatchingPatterns(explain+query,
-        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"},
-        new String[] {}
+        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"}
     );
 
     // Simple condition - LIKE predicate with ESCAPE clause
     query = "select t._id as rowid from hbase.`index_test_primary` as t "
         + "where t.driverlicense like '100007423%' ESCAPE '/'";
     PlanTestBase.testPlanMatchingPatterns(explain+query,
-        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"},
-        new String[] {}
+        new String[] {".*JsonTableGroupScan.*tableName=.*index_test_primary.*rows=10000"}
     );
   }
 }
diff --git a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/TableIndexCmd.java b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/TableIndexCmd.java
index a501f8f..bf50195 100644
--- a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/TableIndexCmd.java
+++ b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/index/TableIndexCmd.java
@@ -71,12 +71,9 @@ public class TableIndexCmd {
   public static void pressEnterKeyToContinue()
   {
     System.out.println("Press any key to continue...");
-    try
-    {
+    try {
       System.in.read();
-    }
-    catch(Exception e)
-    {}
+    } catch(Exception e) {}
   }
 
 
@@ -90,36 +87,35 @@ public class TableIndexCmd {
     boolean waitKeyPress = true;
     long inSize = 10000;
     Map<String, String> params = parseParameter(args);
-    if(args.length >= 2) {
-      if(params.get("host") != null) {
+    if (args.length >= 2) {
+      if (params.get("host") != null) {
         inHost = params.get("host");
       }
-      if(params.get("port") != null) {
+      if (params.get("port") != null) {
         inPort = params.get("port");
       }
-      if(params.get("table") != null) {
+      if (params.get("table") != null) {
         inTable = params.get("table");
       }
-      if(params.get("size") != null) {
+      if (params.get("size") != null) {
         inSize = Long.parseLong(params.get("size"));
       }
-      if(params.get("dict") != null) {
+      if (params.get("dict") != null) {
         dictPath = params.get("dict");
       }
-      if(params.get("wait") != null) {
+      if (params.get("wait") != null) {
         String answer = params.get("wait");
         waitKeyPress = answer.startsWith("y") || answer.startsWith("t")? true : false;
       }
     }
-    if(waitKeyPress == true) {
+    if (waitKeyPress == true) {
       pressEnterKeyToContinue();
     }
     try {
       TestBigTable tbt = new TestBigTable();
       tbt.init(inHost, inPort);
       tbt.gen.generateTableWithIndex(inTable, (int)(inSize & 0xFFFFFFFFL), null);
-    }
-    catch(Exception e) {
+    } catch(Exception e) {
       System.out.println("generate big table got exception:" + e.getMessage());
       e.printStackTrace();
     }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/IndexGroupScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/IndexGroupScan.java
index 1047e82..61db0f7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/IndexGroupScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/IndexGroupScan.java
@@ -72,5 +72,4 @@ public interface IndexGroupScan extends GroupScan {
 
   @JsonIgnore
   public void setParallelizationWidth(int width);
-
 }
\ No newline at end of file
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/RowKeyJoinBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/RowKeyJoinBatch.java
index 7bca6bd..3b5566b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/RowKeyJoinBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/RowKeyJoinBatch.java
@@ -105,7 +105,7 @@ public class RowKeyJoinBatch extends AbstractRecordBatch<RowKeyJoinPOP> implemen
       return;
     }
 
-    for(final VectorWrapper<?> v : left) {
+    for (final VectorWrapper<?> v : left) {
       final TransferPair pair = v.getValueVector().makeTransferPair(
           container.addOrGet(v.getField(), callBack));
       transfers.add(pair);
@@ -148,7 +148,7 @@ public class RowKeyJoinBatch extends AbstractRecordBatch<RowKeyJoinPOP> implemen
         // we got a new batch from the right input, set this flag
         // such that subsequent check by a scan would indicate availability
         // of the row keys.
-        while((rightUpstream == IterOutcome.OK || rightUpstream == IterOutcome.OK_NEW_SCHEMA) &&
+        while ((rightUpstream == IterOutcome.OK || rightUpstream == IterOutcome.OK_NEW_SCHEMA) &&
             right.getRecordCount() == 0) {
           rightUpstream = next(right);
           logger.trace("rowkeyjoin loop when recordCount == 0. rightUpstream {}", rightUpstream);
@@ -203,16 +203,16 @@ public class RowKeyJoinBatch extends AbstractRecordBatch<RowKeyJoinPOP> implemen
   }
 
   private void outputCurrentLeftBatch() {
-    //Schema change when state is FIRST shouldn't happen as buildSchema should
-    //take care of building the schema for the first batch. This check is introduced
-    //to guard against any schema change after buildSchema phase and reading
-    //the first batch of rows.
+    // Schema change when state is FIRST shouldn't happen as buildSchema should
+    // take care of building the schema for the first batch. This check is introduced
+    // to guard against any schema change after buildSchema phase and reading
+    // the first batch of rows.
     if (leftUpstream == IterOutcome.OK_NEW_SCHEMA && state == BatchState.FIRST ||
         state == BatchState.NOT_FIRST) {
       container.zeroVectors();
       transfers.clear();
 
-      for(final VectorWrapper<?> v : left) {
+      for (final VectorWrapper<?> v : left) {
         final TransferPair pair = v.getValueVector().makeTransferPair(
             container.addOrGet(v.getField(), callBack));
         transfers.add(pair);
@@ -223,7 +223,7 @@ public class RowKeyJoinBatch extends AbstractRecordBatch<RowKeyJoinPOP> implemen
       }
     }
 
-    for(TransferPair t : transfers) {
+    for (TransferPair t : transfers) {
       t.transfer();
     }
 
@@ -238,7 +238,7 @@ public class RowKeyJoinBatch extends AbstractRecordBatch<RowKeyJoinPOP> implemen
 
   @Override  // implement RowKeyJoin interface
   public Pair<ValueVector, Integer> nextRowKeyBatch() {
-    if ( hasRowKeyBatch && right.getRecordCount() > 0 ) {
+    if (hasRowKeyBatch && right.getRecordCount() > 0 ) {
       // since entire right row key batch will be returned to the caller, reset
       // the hasRowKeyBatch to false
       hasRowKeyBatch = false;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDefinition.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDefinition.java
index 03c2a44..d756ae0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDefinition.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDefinition.java
@@ -129,19 +129,19 @@ public class DrillIndexDefinition implements IndexDefinition {
   }
 
   boolean castIsCompatible(CastExpression castExpr, Collection<LogicalExpression> indexFields) {
-    for(LogicalExpression indexExpr : indexFields) {
-      if(indexExpr.getClass() != castExpr.getClass()) {
+    for (LogicalExpression indexExpr : indexFields) {
+      if (indexExpr.getClass() != castExpr.getClass()) {
         continue;
       }
       CastExpression indexCastExpr = (CastExpression)indexExpr;
-      //we compare input using equals because we know we are comparing SchemaPath,
-      //if we extend to support other expression, make sure the equals of that expression
-      //is implemented properly, otherwise it will fall to identity comparison
-      if ( !castExpr.getInput().equals(indexCastExpr.getInput()) ) {
+      // we compare input using equals because we know we are comparing SchemaPath,
+      // if we extend to support other expression, make sure the equals of that expression
+      // is implemented properly, otherwise it will fall to identity comparison
+      if (!castExpr.getInput().equals(indexCastExpr.getInput()) ) {
           continue;
       }
 
-      if( castExpr.getMajorType().getMinorType() != indexCastExpr.getMajorType().getMinorType()) {
+      if (castExpr.getMajorType().getMinorType() != indexCastExpr.getMajorType().getMinorType()) {
         continue;
       }
       return true;
@@ -150,8 +150,8 @@ public class DrillIndexDefinition implements IndexDefinition {
   }
 
   protected boolean columnsInIndexFields(Collection<LogicalExpression> columns, Collection<LogicalExpression> indexFields) {
-    //we need to do extra check, so we could allow the case when query condition expression is not identical with indexed fields
-    //and they still could use the index either by implicit cast or the difference is allowed, e.g. width of varchar
+    // we need to do extra check, so we could allow the case when query condition expression is not identical with indexed fields
+    // and they still could use the index either by implicit cast or the difference is allowed, e.g. width of varchar
     for (LogicalExpression col : columns) {
       if (col instanceof CastExpression) {
         if (!castIsCompatible((CastExpression) col, indexFields)) {
@@ -170,8 +170,8 @@ public class DrillIndexDefinition implements IndexDefinition {
   protected boolean someColumnsInIndexFields(Collection<LogicalExpression> columns,
       Collection<LogicalExpression> indexFields) {
 
-    //we need to do extra check, so we could allow the case when query condition expression is not identical with indexed fields
-    //and they still could use the index either by implicit cast or the difference is allowed, e.g. width of varchar
+    // we need to do extra check, so we could allow the case when query condition expression is not identical with indexed fields
+    // and they still could use the index either by implicit cast or the difference is allowed, e.g. width of varchar
     for (LogicalExpression col : columns) {
       if (col instanceof CastExpression) {
         if (castIsCompatible((CastExpression) col, indexFields)) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDescriptor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDescriptor.java
index 4da62c2..3c2d21a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDescriptor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDescriptor.java
@@ -56,7 +56,7 @@ public class DrillIndexDescriptor extends AbstractIndexDescriptor {
 
   @Override
   public double getRows(RelNode scan, RexNode indexCondition) {
-    //TODO: real implementation is to use Drill's stats implementation. for now return fake value 1.0
+    // TODO: real implementation is to use Drill's stats implementation. for now return fake value 1.0
     return 1.0;
   }
 
@@ -78,27 +78,39 @@ public class DrillIndexDescriptor extends AbstractIndexDescriptor {
     return null;
   }
 
-  public void attach(String storageName, DrillTable inTable) {
-    storage = storageName;
-    setDrillTable(inTable);
-  }
-
+  /**
+   * Set the storage plugin name
+   * @param storageName
+   */
   public void setStorageName(String storageName) {
     storage = storageName;
   }
 
+  /**
+   * Get storage plugin name for this index descriptor
+   * @return name of the storage plugin
+   */
   public String getStorageName() {
     return storage;
   }
 
+  /**
+   * Set the drill table corresponding to the index
+   * @param table
+   */
   public void setDrillTable(DrillTable table) {
     this.table = table;
   }
 
+  /**
+   * Get the drill table corresponding to the index descriptor
+   * @return instance of DrillTable
+   */
   public DrillTable getDrillTable() {
     return this.table;
   }
 
+  @Override
   public FunctionalIndexInfo getFunctionalInfo() {
     return null;
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexConditionInfo.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexConditionInfo.java
index aa51f9b..dd7f9fc 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexConditionInfo.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexConditionInfo.java
@@ -62,8 +62,7 @@ public class IndexConditionInfo {
     public Builder(RexNode condition,
                    Iterable<IndexDescriptor> indexes,
                    RexBuilder builder,
-                   RelNode scan
-    ) {
+                   RelNode scan) {
       this.condition = condition;
       this.builder = builder;
       this.scan = scan;
@@ -73,8 +72,7 @@ public class IndexConditionInfo {
     public Builder(RexNode condition,
                    IndexDescriptor index,
                    RexBuilder builder,
-                   DrillScanRel scan
-    ) {
+                   DrillScanRel scan) {
       this.condition = condition;
       this.builder = builder;
       this.scan = scan;
@@ -87,7 +85,7 @@ public class IndexConditionInfo {
      */
     public IndexConditionInfo getCollectiveInfo(IndexLogicalPlanCallContext indexContext) {
       Set<LogicalExpression> paths = Sets.newLinkedHashSet();
-      for ( IndexDescriptor index : indexes ) {
+      for (IndexDescriptor index : indexes ) {
         paths.addAll(index.getIndexColumns());
         //paths.addAll(index.getNonIndexColumns());
       }
@@ -98,10 +96,12 @@ public class IndexConditionInfo {
      * A utility function to check whether the given index hint is valid.
      */
     public boolean isValidIndexHint(IndexLogicalPlanCallContext indexContext) {
-      if (indexContext.indexHint.equals("")) { return false; }
+      if (indexContext.indexHint.equals("")) {
+        return false;
+      }
 
-      for ( IndexDescriptor index: indexes ) {
-        if ( indexContext.indexHint.equals(index.getIndexName())) {
+      for (IndexDescriptor index: indexes ) {
+        if (indexContext.indexHint.equals(index.getIndexName())) {
           return true;
         }
       }
@@ -119,15 +119,15 @@ public class IndexConditionInfo {
       Map<IndexDescriptor, IndexConditionInfo> indexInfoMap = Maps.newLinkedHashMap();
 
       RexNode initCondition = condition;
-      for(IndexDescriptor index : indexes) {
+      for (IndexDescriptor index : indexes) {
         List<LogicalExpression> leadingColumns = new ArrayList<>();
-        if(initCondition.isAlwaysTrue()) {
+        if (initCondition.isAlwaysTrue()) {
           break;
         }
-        //TODO: Ensure we dont get NULL pointer exceptions
+        // TODO: Ensure we dont get NULL pointer exceptions
         leadingColumns.add(index.getIndexColumns().get(0));
         IndexConditionInfo info = indexConditionRelatedToFields(leadingColumns, initCondition);
-        if(info == null || info.hasIndexCol == false) {
+        if (info == null || info.hasIndexCol == false) {
           // No info found, based on remaining condition. Check if the leading columns are same as another index
           IndexConditionInfo origInfo = indexConditionRelatedToFields(leadingColumns, condition);
           if (origInfo == null || origInfo.hasIndexCol == false) {
@@ -144,17 +144,25 @@ public class IndexConditionInfo {
       return indexInfoMap;
     }
 
+    /**
+     * Given a RexNode corresponding to the condition expression tree and the index descriptor,
+     * check if one or more columns involved in the condition tree form a prefix of the columns in the
+     * index keys.
+     * @param indexDesc
+     * @param initCondition
+     * @return True if prefix, False if not
+     */
     public boolean isConditionPrefix(IndexDescriptor indexDesc, RexNode initCondition) {
       List<LogicalExpression> indexCols = indexDesc.getIndexColumns();
       boolean prefix = true;
       int numPrefix = 0;
       if (indexCols.size() > 0 && initCondition != null) {
-        int i=0;
+        int i = 0;
         while (prefix && i < indexCols.size()) {
           LogicalExpression p = indexCols.get(i++);
           List<LogicalExpression> prefixCol = ImmutableList.of(p);
           IndexConditionInfo info = indexConditionRelatedToFields(prefixCol, initCondition);
-          if(info != null && info.hasIndexCol) {
+          if (info != null && info.hasIndexCol) {
             numPrefix++;
             initCondition = info.remainderCondition;
             if (initCondition.isAlwaysTrue()) {
@@ -194,14 +202,14 @@ public class IndexConditionInfo {
       Map<IndexDescriptor, IndexConditionInfo> indexInfoMap = Maps.newLinkedHashMap();
       RexNode initCondition = condition;
       for (IndexDescriptor index : indexes) {
-        if(initCondition.isAlwaysTrue()) {
+        if (initCondition.isAlwaysTrue()) {
           break;
         }
-        if(!isConditionPrefix(index, initCondition)) {
+        if (!isConditionPrefix(index, initCondition)) {
           continue;
         }
         IndexConditionInfo info = indexConditionRelatedToFields(index.getIndexColumns(), initCondition);
-        if(info == null || info.hasIndexCol == false) {
+        if (info == null || info.hasIndexCol == false) {
           continue;
         }
         initCondition = info.remainderCondition;
@@ -235,7 +243,7 @@ public class IndexConditionInfo {
 
       List<RexNode> conjuncts = RelOptUtil.conjunctions(condition);
       List<RexNode> indexConjuncts = RelOptUtil.conjunctions(indexCondition);
-      for(RexNode indexConjunction: indexConjuncts) {
+      for (RexNode indexConjunction: indexConjuncts) {
         RexUtil.removeAll(conjuncts, indexConjunction);
       }
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexPlanUtils.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexPlanUtils.java
index cdad63a..c0758c7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexPlanUtils.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexPlanUtils.java
@@ -103,8 +103,8 @@ public class IndexPlanUtils {
     GroupScan groupScan = scanRel.getGroupScan();
     if (groupScan instanceof DbGroupScan) {
       DbGroupScan dbscan = ((DbGroupScan) groupScan);
-      //if we already applied index convert rule, and this scan is indexScan or restricted scan already,
-      //no more trying index convert rule
+      // if we already applied index convert rule, and this scan is indexScan or restricted scan already,
+      // no more trying index convert rule
       return dbscan.supportsSecondaryIndex() && (!dbscan.isIndexScan()) && (!dbscan.isRestrictedScan());
     }
     return false;
@@ -115,8 +115,8 @@ public class IndexPlanUtils {
    * @return
    */
   static public boolean isCoveringIndex(IndexCallContext indexContext, FunctionalIndexInfo functionInfo) {
-    if(functionInfo.hasFunctional()) {
-      //need info from full query
+    if (functionInfo.hasFunctional()) {
+      // need info from full query
       return queryCoveredByIndex(indexContext, functionInfo);
     }
     DbGroupScan groupScan = (DbGroupScan) getGroupScan(indexContext.getScan());
@@ -140,12 +140,12 @@ public class IndexPlanUtils {
    */
   static private boolean queryCoveredByIndex(IndexCallContext indexContext,
                               FunctionalIndexInfo functionInfo) {
-    //for indexed functions, if relevant schemapaths are included in index(in indexed fields or non-indexed fields),
+    // for indexed functions, if relevant schemapaths are included in index(in indexed fields or non-indexed fields),
     // check covering based on the local information we have:
-    //   if references to schema paths in functional indexes disappear beyond capProject
+    // if references to schema paths in functional indexes disappear beyond capProject
 
     if (indexContext.getFilter() != null && indexContext.getUpperProject() == null) {
-      if( !isFullQuery(indexContext)) {
+      if (!isFullQuery(indexContext)) {
         return false;
       }
     }
@@ -160,7 +160,7 @@ public class IndexPlanUtils {
           LogicalExpression expr = RexToExpression.toDrill(parserContext, null, indexContext.getScan(), rex);
           exprs.add(expr);
         }
-        //now collect paths in filter since upperProject may drop some paths in filter
+        // now collect paths in filter since upperProject may drop some paths in filter
         IndexableExprMarker filterMarker = new IndexableExprMarker(indexContext.getScan());
         indexContext.getFilterCondition().accept(filterMarker);
         for (RexNode rex : filterMarker.getIndexableExpression().keySet()) {
@@ -168,7 +168,7 @@ public class IndexPlanUtils {
           exprs.add(expr);
         }
       } else {
-        //we have underneath project, so we have to do more to convert expressions
+        // we have underneath project, so we have to do more to convert expressions
         for (RexNode rex : indexContext.getUpperProject().getProjects()) {
           LogicalExpression expr = RexToExpression.toDrill(parserContext, indexContext.getLowerProject(), indexContext.getScan(), rex);
           exprs.add(expr);
@@ -194,22 +194,22 @@ public class IndexPlanUtils {
         exprs.add(expr);
       }
     }
-    else {//upperProject and lowerProject both are null, the only place to find columns being used in query is scan
+    else { // upperProject and lowerProject both are null, the only place to find columns being used in query is scan
       exprs.addAll(indexContext.getScanColumns());
     }
 
     Map<LogicalExpression, Set<SchemaPath>> exprPathMap = functionInfo.getPathsInFunctionExpr();
     PathInExpr exprSearch = new PathInExpr(exprPathMap);
 
-    for(LogicalExpression expr: exprs) {
-      if(expr.accept(exprSearch, null) == false) {
+    for (LogicalExpression expr: exprs) {
+      if (expr.accept(exprSearch, null) == false) {
         return false;
       }
     }
-    //if we come to here, paths in indexed function expressions are covered in capProject.
-    //now we check other paths.
+    // if we come to here, paths in indexed function expressions are covered in capProject.
+    // now we check other paths.
 
-    //check the leftout paths (appear in capProject other than functional index expression) are covered by other index fields or not
+    // check the leftout paths (appear in capProject other than functional index expression) are covered by other index fields or not
     List<LogicalExpression> leftPaths = Lists.newArrayList(exprSearch.getRemainderPaths());
 
     indexContext.setLeftOutPathsInFunctions(exprSearch.getRemainderPathsInFunctions());
@@ -218,7 +218,7 @@ public class IndexPlanUtils {
 
   static private boolean isFullQuery(IndexCallContext indexContext) {
     RelNode rootInCall = indexContext.getCall().rel(0);
-    //check if the tip of the operator stack we have is also the top of the whole query, if yes, return true
+    // check if the tip of the operator stack we have is also the top of the whole query, if yes, return true
     if (indexContext.getCall().getPlanner().getRoot() instanceof RelSubset) {
       final RelSubset rootSet = (RelSubset) indexContext.getCall().getPlanner().getRoot();
       if (rootSet.getRelList().contains(rootInCall)) {
@@ -241,13 +241,13 @@ public class IndexPlanUtils {
    * @return the output RelCollation
    */
   public static RelCollation buildCollationLowerProject(List<RexNode> projectRexs, RelNode input, FunctionalIndexInfo indexInfo) {
-    //if leading fields of index are here, add them to RelCollation
+    // if leading fields of index are here, add them to RelCollation
     List<RelFieldCollation> newFields = Lists.newArrayList();
     if (!indexInfo.hasFunctional()) {
       Map<LogicalExpression, Integer> projectExprs = Maps.newLinkedHashMap();
       DrillParseContext parserContext = new DrillParseContext(PrelUtil.getPlannerSettings(input.getCluster()));
       int idx=0;
-      for(RexNode rex : projectRexs) {
+      for (RexNode rex : projectRexs) {
         projectExprs.put(DrillOptiq.toDrill(parserContext, input, rex), idx);
         idx++;
       }
@@ -336,7 +336,7 @@ public class IndexPlanUtils {
   }
 
   public static int getIndexFromCollation(int refIndex, List<RelFieldCollation> inputFieldCollations) {
-    for (int i=0; i < inputFieldCollations.size(); i++) {
+    for (int i = 0; i < inputFieldCollations.size(); i++) {
       if (refIndex == inputFieldCollations.get(i).getFieldIndex()) {
         return i;
       }
@@ -385,14 +385,14 @@ public class IndexPlanUtils {
             indexContext.getUpperProject().getProjects().get(idx));
         indexContext.getSortExprs().add(expr);
       }
-      else {//one project is null now
+      else { // one project is null now
         oneProject = (indexContext.getUpperProject() != null)? indexContext.getUpperProject() : indexContext.getLowerProject();
-        if(oneProject != null) {
+        if (oneProject != null) {
           LogicalExpression expr = RexToExpression.toDrill(parserContext, null, indexContext.getScan(),
               getProjects(oneProject).get(idx));
           indexContext.getSortExprs().add(expr);
         }
-        else {//two projects are null
+        else { // two projects are null
           SchemaPath path;
           RelDataTypeField f = indexContext.getScan().getRowType().getFieldList().get(idx);
           String pathSeg = f.getName().replaceAll("`", "");
@@ -426,14 +426,14 @@ public class IndexPlanUtils {
                 indexContext.upperProject.getProjects().get(idx));
         indexContext.sortExprs.add(expr);
       }
-      else {//one project is null now
+      else { // one project is null now
         oneProject = (indexContext.upperProject != null)? indexContext.upperProject : indexContext.lowerProject;
-        if(oneProject != null) {
+        if (oneProject != null) {
           LogicalExpression expr = RexToExpression.toDrill(parserContext, null, indexContext.scan,
                   oneProject.getProjects().get(idx));
           indexContext.sortExprs.add(expr);
         }
-        else {//two projects are null
+        else { // two projects are null
           SchemaPath path;
           RelDataTypeField f = indexContext.scan.getRowType().getFieldList().get(idx);
           String pathSeg = f.getName().replaceAll("`", "");
@@ -452,8 +452,8 @@ public class IndexPlanUtils {
    * @return if there is filter and expr is only in equality condition of the filter, return true
    */
   private static boolean exprOnlyInEquality(LogicalExpression expr, IndexCallContext context) {
-    //if there is no filter, expr wont be in equality
-    if(context.getFilter() == null) {
+    // if there is no filter, expr wont be in equality
+    if (context.getFilter() == null) {
       return false;
     }
     final Set<LogicalExpression> onlyInEquality = context.getOrigMarker().getExpressionsOnlyInEquality();
@@ -496,7 +496,7 @@ public class IndexPlanUtils {
     Map<LogicalExpression, Integer> projectExprs = Maps.newLinkedHashMap();
     DrillParseContext parserContext = new DrillParseContext(PrelUtil.getPlannerSettings(input.getCluster()));
     int idx=0;
-    for(RexNode rex : projectRexs) {
+    for (RexNode rex : projectRexs) {
       LogicalExpression expr;
       expr = RexToExpression.toDrill(parserContext, project, input, rex);
       projectExprs.put(expr, idx);
@@ -546,7 +546,7 @@ public class IndexPlanUtils {
       if (!projectExprs.containsKey(expr)) {
         // leading indexed field is not projected
         // but it is only-in-equality field, -- we continue to next indexed field, but we don't generate collation for this field
-        if(exprOnlyInEquality(expr, context)) {
+        if (exprOnlyInEquality(expr, context)) {
           continue;
         }
         // else no more collation is needed to be generated, since we now have one leading field which is not in equality condition
@@ -566,7 +566,7 @@ public class IndexPlanUtils {
       RelFieldCollation.NullDirection nullsDir = indexDesc.getNullsOrderingDirection();
       RelFieldCollation.Direction dir = (idxCollation == null)?
           null : idxCollation.getFieldCollations().get(idxFieldCount).direction;
-      if ( dir == null) {
+      if (dir == null) {
         break;
       }
       newFields.add(new RelFieldCollation(projectExprs.get(expr), dir, nullsDir));
@@ -609,7 +609,7 @@ public class IndexPlanUtils {
           FieldReference ref = FieldReference.getWithQuotedRef(f1.getName());
           RelFieldCollation origCollation = collationMap.get(ref);
           if (origCollation != null) {
-            RelFieldCollation fc = new RelFieldCollation(j,//origCollation.getFieldIndex(),
+            RelFieldCollation fc = new RelFieldCollation(j,
                 origCollation.direction, origCollation.nullDirection);
             rsScanCollationMap.put(origCollation.getFieldIndex(), fc);
           }
@@ -703,18 +703,18 @@ public class IndexPlanUtils {
     }
 
     List<SchemaPath> newPaths = Lists.newArrayList(paths);
-    for (int i=0; i<paths.size(); ++i) {
+    for (int i = 0; i < paths.size(); ++i) {
       SchemaPath newPath = functionInfo.getNewPath(paths.get(i));
-      if(newPath == null) {
+      if (newPath == null) {
         continue;
       }
 
       addedPaths.add(newPath);
       // if this path only in indexed function, we are safe to replace it
-      if(pathOnlyInIndexedFunction(paths.get(i))) {
+      if (pathOnlyInIndexedFunction(paths.get(i))) {
         newPaths.set(i, newPath);
       }
-      else {// we should not replace this column, instead we add a new "$N" field.
+      else {  // we should not replace this column, instead we add a new "$N" field.
         newPaths.add(newPath);
       }
     }
@@ -797,14 +797,14 @@ public class IndexPlanUtils {
                                   List<LogicalExpression> indexCols,
                                   IndexConditionInfo.Builder builder, RexNode condition) {
     boolean prefix = true;
-    int i=0;
+    int i = 0;
 
     RexNode initCondition = condition.isAlwaysTrue() ? null : condition;
     while (prefix && i < indexCols.size()) {
       LogicalExpression p = indexCols.get(i++);
       List<LogicalExpression> prefixCol = ImmutableList.of(p);
       IndexConditionInfo info = builder.indexConditionRelatedToFields(prefixCol, initCondition);
-      if(info != null && info.hasIndexCol) {
+      if (info != null && info.hasIndexCol) {
         // the col had a match with one of the conditions; save the information about
         // indexcol --> condition mapping
         leadingPrefixMap.put(p, info.indexCondition);
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexSelector.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexSelector.java
index b7f2b78..d3a0b13 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexSelector.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/IndexSelector.java
@@ -157,7 +157,7 @@ public class IndexSelector  {
               indexContext.getScan(), functionInfo, indexContext);
     }
 
-    if ( (inputCollation != null) &&
+    if ((inputCollation != null) &&
          (inputCollation.satisfies(indexContext.getCollation()))) {
       return true;
     }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/SimpleRexRemap.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/SimpleRexRemap.java
index eba44b1..812c5f6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/SimpleRexRemap.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/SimpleRexRemap.java
@@ -86,31 +86,31 @@ public class SimpleRexRemap {
   public RexNode rewriteEqualOnCharToLike(RexNode expr,
                                           Map<RexNode, LogicalExpression> equalOnCastCharExprs) {
     Map<RexNode, RexNode> srcToReplace = Maps.newIdentityHashMap();
-    for(Map.Entry<RexNode, LogicalExpression> entry: equalOnCastCharExprs.entrySet()) {
+    for (Map.Entry<RexNode, LogicalExpression> entry: equalOnCastCharExprs.entrySet()) {
       RexNode equalOp = entry.getKey();
       LogicalExpression opInput = entry.getValue();
 
       final List<RexNode> operands = ((RexCall)equalOp).getOperands();
       RexLiteral newLiteral = null;
       RexNode input = null;
-      if(operands.size() == 2 ) {
+      if (operands.size() == 2 ) {
         RexLiteral oplit = null;
         if (operands.get(0) instanceof RexLiteral) {
           oplit = (RexLiteral) operands.get(0);
-          if(oplit.getTypeName() == SqlTypeName.CHAR) {
+          if (oplit.getTypeName() == SqlTypeName.CHAR) {
             newLiteral = builder.makeLiteral(((NlsString) oplit.getValue()).getValue() + "%");
             input = operands.get(1);
           }
         }
         else if (operands.get(1) instanceof RexLiteral) {
           oplit = (RexLiteral) operands.get(1);
-          if(oplit.getTypeName() == SqlTypeName.CHAR) {
+          if (oplit.getTypeName() == SqlTypeName.CHAR) {
             newLiteral = builder.makeLiteral(((NlsString) oplit.getValue()).getValue() + "%");
             input = operands.get(0);
           }
         }
       }
-      if(newLiteral != null) {
+      if (newLiteral != null) {
         srcToReplace.put(equalOp, builder.makeCall(SqlStdOperatorTable.LIKE, input, newLiteral));
       }
     }
@@ -130,16 +130,16 @@ public class SimpleRexRemap {
    */
   public RexNode rewriteWithMap(RexNode srcRex, Map<RexNode, LogicalExpression> mapRexToExpr) {
     Map<RexNode, RexNode> destNodeMap = Maps.newHashMap();
-    for(Map.Entry<RexNode, LogicalExpression> entry: mapRexToExpr.entrySet()) {
+    for (Map.Entry<RexNode, LogicalExpression> entry: mapRexToExpr.entrySet()) {
       LogicalExpression entryExpr = entry.getValue();
 
       LogicalExpression destExpr = destExprMap.get(entryExpr);
-      //then build rexNode from the path
+      // then build rexNode from the path
       RexNode destRex = buildRexForField(destExpr==null?entryExpr : destExpr, newRowType);
       destNodeMap.put(entry.getKey(), destRex);
     }
 
-    //Visit through the nodes, if destExprMap has an entry to provide substitute to replace a rexNode, replace the rexNode
+    // Visit through the nodes, if destExprMap has an entry to provide substitute to replace a rexNode, replace the rexNode
     RexReplace replacer = new RexReplace(destNodeMap);
     RexNode resultRex = srcRex.accept(replacer);
     return resultRex;
@@ -158,7 +158,7 @@ public class SimpleRexRemap {
 
   public static String getFullPath(PathSegment pathSeg) {
     PathSegment.NameSegment nameSeg = (PathSegment.NameSegment)pathSeg;
-    if(nameSeg.isLastPath()) {
+    if (nameSeg.isLastPath()) {
       return nameSeg.getPath();
     }
     return String.format("%s.%s",
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/rules/AbstractMatchFunction.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/rules/AbstractMatchFunction.java
index 0449c10..a6c1fe7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/rules/AbstractMatchFunction.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/rules/AbstractMatchFunction.java
@@ -27,8 +27,8 @@ public abstract class AbstractMatchFunction<T> implements MatchFunction<T> {
     GroupScan groupScan = scanRel.getGroupScan();
     if (groupScan instanceof DbGroupScan) {
       DbGroupScan dbscan = ((DbGroupScan) groupScan);
-      //if we already applied index convert rule, and this scan is indexScan or restricted scan already,
-      //no more trying index convert rule
+      // if we already applied index convert rule, and this scan is indexScan or restricted scan already,
+      // no more trying index convert rule
       return dbscan.supportsSecondaryIndex() && (!dbscan.isIndexScan()) && (!dbscan.isRestrictedScan());
     }
     return false;
@@ -37,8 +37,8 @@ public abstract class AbstractMatchFunction<T> implements MatchFunction<T> {
   public boolean checkScan(GroupScan groupScan) {
     if (groupScan instanceof DbGroupScan) {
       DbGroupScan dbscan = ((DbGroupScan) groupScan);
-      //if we already applied index convert rule, and this scan is indexScan or restricted scan already,
-      //no more trying index convert rule
+      // if we already applied index convert rule, and this scan is indexScan or restricted scan already,
+      // no more trying index convert rule
       return dbscan.supportsSecondaryIndex() &&
              !dbscan.isRestrictedScan() &&
               (!dbscan.isFilterPushedDown() || dbscan.isIndexScan()) &&
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillMergeProjectRule.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillMergeProjectRule.java
index c8c0db3..3a3c409 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillMergeProjectRule.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillMergeProjectRule.java
@@ -174,7 +174,7 @@ public class DrillMergeProjectRule extends RelOptRule {
         RelOptUtil.pushPastProject(topProject.getProjects(), bottomProject);
 
     // replace the two projects with a combined projection
-    if(topProject instanceof DrillProjectRel) {
+    if (topProject instanceof DrillProjectRel) {
       RelNode newProjectRel = DrillRelFactories.DRILL_LOGICAL_PROJECT_FACTORY.createProject(
           bottomProject.getInput(), newProjects,
           topProject.getRowType().getFieldNames());