You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by mm...@apache.org on 2018/02/16 15:52:35 UTC

[21/32] hive git commit: HIVE-18622: Vectorization: IF Statements, Comparisons, and more do not handle NULLs correctly (Matt McCline, reviewed by Sergey Shelukhin, Deepak Jaiswal, Vihang Karajgaonkar)

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldTimestamp.java
index 54cb5d8..740a00c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFTimestampFieldTimestamp.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
+import java.util.Arrays;
 import java.util.Calendar;
 
 import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
@@ -95,27 +96,41 @@ public abstract class VectorUDFTimestampFieldTimestamp extends VectorExpression
       return;
     }
 
-    /* true for all algebraic UDFs with no state */
-    outV.isRepeating = inputColVec.isRepeating;
+    // We do not need to do a column reset since we are carefully changing the output.
+    outV.isRepeating = false;
 
     TimestampColumnVector timestampColVector = (TimestampColumnVector) inputColVec;
 
+    if (inputColVec.isRepeating) {
+      if (inputColVec.noNulls || !inputColVec.isNull[0]) {
+        outV.isNull[0] = false;
+        outV.vector[0] = getTimestampField(timestampColVector, 0);
+      } else {
+        outV.isNull[0] = true;
+        outV.noNulls = false;
+      }
+      outV.isRepeating = true;
+      return;
+    }
+
     if (inputColVec.noNulls) {
-      outV.noNulls = true;
       if (selectedInUse) {
         for(int j=0; j < n; j++) {
           int i = sel[j];
+          outV.isNull[i] = false;
           outV.vector[i] = getTimestampField(timestampColVector, i);
         }
       } else {
+        Arrays.fill(outV.isNull, 0, n, false);
         for(int i = 0; i < n; i++) {
           outV.vector[i] = getTimestampField(timestampColVector, i);
         }
       }
-    } else {
-      // Handle case with nulls. Don't do function if the value is null, to save time,
-      // because calling the function can be expensive.
+    } else /* there are nulls in the inputColVector */ {
+
+      // Carefully handle NULLs...
       outV.noNulls = false;
+
       if (selectedInUse) {
         for(int j=0; j < n; j++) {
           int i = sel[j];

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
index 6ebd7d3..18bacc5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
@@ -151,7 +151,7 @@ public class VectorUDAFBloomFilter extends VectorAggregateExpression {
     Aggregation myagg = (Aggregation) agg;
 
     if (inputColumn.isRepeating) {
-      if (inputColumn.noNulls) {
+      if (inputColumn.noNulls || !inputColumn.isNull[0]) {
         valueProcessor.processValue(myagg, inputColumn, 0);
       }
       return;
@@ -251,7 +251,11 @@ public class VectorUDAFBloomFilter extends VectorAggregateExpression {
       }
     } else {
       if (inputColumn.isRepeating) {
-        // All nulls, no-op for min/max
+        if (!inputColumn.isNull[0]) {
+          iterateNoNullsRepeatingWithAggregationSelection(
+              aggregationBufferSets, aggregateIndex,
+              inputColumn, batchSize);
+        }
       } else {
         if (batch.selectedInUse) {
           iterateHasNullsSelectionWithAggregationSelection(

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilterMerge.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilterMerge.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilterMerge.java
index 8f1375e..fe5e33a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilterMerge.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilterMerge.java
@@ -123,7 +123,7 @@ public class VectorUDAFBloomFilterMerge extends VectorAggregateExpression {
     Aggregation myagg = (Aggregation) agg;
 
     if (inputColumn.isRepeating) {
-      if (inputColumn.noNulls) {
+      if (inputColumn.noNulls || !inputColumn.isNull[0]) {
         processValue(myagg, inputColumn, 0);
       }
       return;
@@ -223,7 +223,11 @@ public class VectorUDAFBloomFilterMerge extends VectorAggregateExpression {
       }
     } else {
       if (inputColumn.isRepeating) {
-        // All nulls, no-op for min/max
+        if (!inputColumn.isNull[0]) {
+          iterateNoNullsRepeatingWithAggregationSelection(
+              aggregationBufferSets, aggregateIndex,
+              inputColumn, batchSize);
+        }
       } else {
         if (batch.selectedInUse) {
           iterateHasNullsSelectionWithAggregationSelection(

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
index 888f5f0..0463de5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
@@ -37,155 +37,155 @@ public class VectorUDAFCountMerge extends VectorAggregateExpression {
 
   private static final long serialVersionUID = 1L;
 
-    /**
-     * class for storing the current aggregate value.
-     */
-    static class Aggregation implements AggregationBuffer {
-
-      private static final long serialVersionUID = 1L;
+  /**
+   * class for storing the current aggregate value.
+   */
+  static class Aggregation implements AggregationBuffer {
 
-      transient private long value;
+    private static final long serialVersionUID = 1L;
 
-      @Override
-      public int getVariableSize() {
-        throw new UnsupportedOperationException();
-      }
+    private transient long value;
 
-      @Override
-      public void reset() {
-        value = 0L;
-      }
+    @Override
+    public int getVariableSize() {
+      throw new UnsupportedOperationException();
     }
 
-    // This constructor is used to momentarily create the object so match can be called.
-    public VectorUDAFCountMerge() {
-      super();
+    @Override
+    public void reset() {
+      value = 0L;
     }
+  }
 
-    public VectorUDAFCountMerge(VectorAggregationDesc vecAggrDesc) {
-      super(vecAggrDesc);
-      init();
-    }
+  // This constructor is used to momentarily create the object so match can be called.
+  public VectorUDAFCountMerge() {
+    super();
+  }
 
-    private void init() {
-    }
+  public VectorUDAFCountMerge(VectorAggregationDesc vecAggrDesc) {
+    super(vecAggrDesc);
+    init();
+  }
 
-    private Aggregation getCurrentAggregationBuffer(
-        VectorAggregationBufferRow[] aggregationBufferSets,
-        int aggregateIndex,
-        int row) {
-      VectorAggregationBufferRow mySet = aggregationBufferSets[row];
-      Aggregation myagg = (Aggregation) mySet.getAggregationBuffer(aggregateIndex);
-      return myagg;
-    }
+  private void init() {
+  }
 
-    @Override
-    public void aggregateInputSelection(
+  private Aggregation getCurrentAggregationBuffer(
+      VectorAggregationBufferRow[] aggregationBufferSets,
+      int aggregateIndex,
+      int row) {
+    VectorAggregationBufferRow mySet = aggregationBufferSets[row];
+    Aggregation myagg = (Aggregation) mySet.getAggregationBuffer(aggregateIndex);
+    return myagg;
+  }
+
+  @Override
+  public void aggregateInputSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregateIndex,
       VectorizedRowBatch batch) throws HiveException {
 
-      int batchSize = batch.size;
+    int batchSize = batch.size;
 
-      if (batchSize == 0) {
-        return;
-      }
+    if (batchSize == 0) {
+      return;
+    }
 
-      inputExpression.evaluate(batch);
+    inputExpression.evaluate(batch);
 
-      LongColumnVector inputVector =
-          (LongColumnVector) batch.cols[
-              this.inputExpression.getOutputColumnNum()];
+    LongColumnVector inputVector =
+        (LongColumnVector) batch.cols[
+            this.inputExpression.getOutputColumnNum()];
 
-      long[] vector = inputVector.vector;
+    long[] vector = inputVector.vector;
 
-      if (inputVector.noNulls) {
-        if (inputVector.isRepeating) {
-          iterateNoNullsRepeatingWithAggregationSelection(
+    if (inputVector.noNulls) {
+      if (inputVector.isRepeating) {
+        iterateNoNullsRepeatingWithAggregationSelection(
             aggregationBufferSets, aggregateIndex,
             vector[0], batchSize);
-        } else {
-          if (batch.selectedInUse) {
-            iterateNoNullsSelectionWithAggregationSelection(
+      } else {
+        if (batch.selectedInUse) {
+          iterateNoNullsSelectionWithAggregationSelection(
               aggregationBufferSets, aggregateIndex,
               vector, batch.selected, batchSize);
-          } else {
-            iterateNoNullsWithAggregationSelection(
+        } else {
+          iterateNoNullsWithAggregationSelection(
               aggregationBufferSets, aggregateIndex,
               vector, batchSize);
-          }
         }
-      } else {
-        if (inputVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
+      }
+    } else {
+      if (inputVector.isRepeating) {
+        if (batch.selectedInUse) {
+          iterateHasNullsRepeatingSelectionWithAggregationSelection(
               aggregationBufferSets, aggregateIndex,
               vector[0], batchSize, batch.selected, inputVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
+        } else {
+          iterateHasNullsRepeatingWithAggregationSelection(
               aggregationBufferSets, aggregateIndex,
               vector[0], batchSize, inputVector.isNull);
-          }
-        } else {
-          if (batch.selectedInUse) {
-            iterateHasNullsSelectionWithAggregationSelection(
+        }
+      } else {
+        if (batch.selectedInUse) {
+          iterateHasNullsSelectionWithAggregationSelection(
               aggregationBufferSets, aggregateIndex,
               vector, batchSize, batch.selected, inputVector.isNull);
-          } else {
-            iterateHasNullsWithAggregationSelection(
+        } else {
+          iterateHasNullsWithAggregationSelection(
               aggregationBufferSets, aggregateIndex,
               vector, batchSize, inputVector.isNull);
-          }
         }
       }
     }
+  }
 
-    private void iterateNoNullsRepeatingWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int aggregateIndex,
-      long value,
-      int batchSize) {
+  private void iterateNoNullsRepeatingWithAggregationSelection(
+    VectorAggregationBufferRow[] aggregationBufferSets,
+    int aggregateIndex,
+    long value,
+    int batchSize) {
 
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
+    for (int i=0; i < batchSize; ++i) {
+      Aggregation myagg = getCurrentAggregationBuffer(
           aggregationBufferSets, 
           aggregateIndex,
           i);
-        myagg.value += value;
-      }
-    } 
-
-    private void iterateNoNullsSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int aggregateIndex,
-      long[] values,
-      int[] selection,
-      int batchSize) {
-      
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
+      myagg.value += value;
+    }
+  } 
+
+  private void iterateNoNullsSelectionWithAggregationSelection(
+    VectorAggregationBufferRow[] aggregationBufferSets,
+    int aggregateIndex,
+    long[] values,
+    int[] selection,
+    int batchSize) {
+    
+    for (int i=0; i < batchSize; ++i) {
+      Aggregation myagg = getCurrentAggregationBuffer(
           aggregationBufferSets, 
           aggregateIndex,
           i);
-        myagg.value += values[selection[i]];
-      }
+      myagg.value += values[selection[i]];
     }
+  }
 
-    private void iterateNoNullsWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int aggregateIndex,
-      long[] values,
-      int batchSize) {
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
+  private void iterateNoNullsWithAggregationSelection(
+    VectorAggregationBufferRow[] aggregationBufferSets,
+    int aggregateIndex,
+    long[] values,
+    int batchSize) {
+    for (int i=0; i < batchSize; ++i) {
+      Aggregation myagg = getCurrentAggregationBuffer(
           aggregationBufferSets, 
           aggregateIndex,
           i);
-        myagg.value += values[i];
-      }
+      myagg.value += values[i];
     }
+  }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
+  private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregateIndex,
       long value,
@@ -193,41 +193,41 @@ public class VectorUDAFCountMerge extends VectorAggregateExpression {
       int[] selection,
       boolean[] isNull) {
 
-      if (isNull[0]) {
-        return;
-      }
-      
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
+    if (isNull[0]) {
+      return;
+    }
+
+    for (int i=0; i < batchSize; ++i) {
+      Aggregation myagg = getCurrentAggregationBuffer(
           aggregationBufferSets,
           aggregateIndex,
           i);
-        myagg.value += value;
-      }
-      
+      myagg.value += value;
     }
+    
+  }
 
-    private void iterateHasNullsRepeatingWithAggregationSelection(
+  private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregateIndex,
       long value,
       int batchSize,
       boolean[] isNull) {
 
-      if (isNull[0]) {
-        return;
-      }
+    if (isNull[0]) {
+      return;
+    }
 
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
+    for (int i=0; i < batchSize; ++i) {
+      Aggregation myagg = getCurrentAggregationBuffer(
           aggregationBufferSets,
           aggregateIndex,
           i);
-        myagg.value += value;
-      }
+      myagg.value += value;
     }
+  }
 
-    private void iterateHasNullsSelectionWithAggregationSelection(
+  private void iterateHasNullsSelectionWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregateIndex,
       long[] values,
@@ -235,146 +235,146 @@ public class VectorUDAFCountMerge extends VectorAggregateExpression {
       int[] selection,
       boolean[] isNull) {
 
-      for (int j=0; j < batchSize; ++j) {
-        int i = selection[j];
-        if (!isNull[i]) {
-          Aggregation myagg = getCurrentAggregationBuffer(
+    for (int j=0; j < batchSize; ++j) {
+      int i = selection[j];
+      if (!isNull[i]) {
+        Aggregation myagg = getCurrentAggregationBuffer(
             aggregationBufferSets, 
             aggregateIndex,
             j);
-          myagg.value += values[i];
-        }
+        myagg.value += values[i];
       }
-   }
+    }
+  }
 
-    private void iterateHasNullsWithAggregationSelection(
+  private void iterateHasNullsWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregateIndex,
       long[] values,
       int batchSize,
       boolean[] isNull) {
 
-      for (int i=0; i < batchSize; ++i) {
-        if (!isNull[i]) {
-          Aggregation myagg = getCurrentAggregationBuffer(
+    for (int i=0; i < batchSize; ++i) {
+      if (!isNull[i]) {
+        Aggregation myagg = getCurrentAggregationBuffer(
             aggregationBufferSets, 
             aggregateIndex,
             i);
-          myagg.value += values[i];
-        }
+        myagg.value += values[i];
       }
-   }
+    }
+  }
 
-    @Override
-    public void aggregateInput(AggregationBuffer agg, VectorizedRowBatch batch)
-    throws HiveException {
+  @Override
+  public void aggregateInput(AggregationBuffer agg, VectorizedRowBatch batch)
+      throws HiveException {
 
-      inputExpression.evaluate(batch);
+    inputExpression.evaluate(batch);
 
-      LongColumnVector inputVector =
-          (LongColumnVector) batch.cols[
-              this.inputExpression.getOutputColumnNum()];
+    LongColumnVector inputVector =
+        (LongColumnVector) batch.cols[
+            this.inputExpression.getOutputColumnNum()];
 
-      int batchSize = batch.size;
+    int batchSize = batch.size;
 
-      if (batchSize == 0) {
-        return;
-      }
+    if (batchSize == 0) {
+      return;
+    }
 
-      Aggregation myagg = (Aggregation)agg;
+    Aggregation myagg = (Aggregation)agg;
 
-      long[] vector = inputVector.vector;
-      
-      if (inputVector.isRepeating) {
-        if (inputVector.noNulls) {
-          myagg.value += vector[0]*batchSize;
-        }
-        return;
-      }
+    long[] vector = inputVector.vector;
 
-      if (!batch.selectedInUse && inputVector.noNulls) {
-        iterateNoSelectionNoNulls(myagg, vector, batchSize);
-      }
-      else if (!batch.selectedInUse) {
-        iterateNoSelectionHasNulls(myagg, vector, batchSize, inputVector.isNull);
-      }
-      else if (inputVector.noNulls){
-        iterateSelectionNoNulls(myagg, vector, batchSize, batch.selected);
-      }
-      else {
-        iterateSelectionHasNulls(myagg, vector, batchSize, inputVector.isNull, batch.selected);
+    if (inputVector.isRepeating) {
+      if (inputVector.noNulls || !inputVector.isNull[0]) {
+        myagg.value += vector[0]*batchSize;
       }
+      return;
     }
-  
-    private void iterateSelectionHasNulls(
-        Aggregation myagg, 
-        long[] vector, 
-        int batchSize,
-        boolean[] isNull, 
-        int[] selected) {
-
-      for (int j=0; j< batchSize; ++j) {
-        int i = selected[j];
-        if (!isNull[i]) {
-          myagg.value += vector[i];
-        }
-      }
+
+    if (!batch.selectedInUse && inputVector.noNulls) {
+      iterateNoSelectionNoNulls(myagg, vector, batchSize);
+    }
+    else if (!batch.selectedInUse) {
+      iterateNoSelectionHasNulls(myagg, vector, batchSize, inputVector.isNull);
+    }
+    else if (inputVector.noNulls){
+      iterateSelectionNoNulls(myagg, vector, batchSize, batch.selected);
+    }
+    else {
+      iterateSelectionHasNulls(myagg, vector, batchSize, inputVector.isNull, batch.selected);
     }
+  }
 
-    private void iterateSelectionNoNulls(
-        Aggregation myagg, 
-        long[] vector, 
-        int batchSize, 
-        int[] selected) {
+  private void iterateSelectionHasNulls(
+      Aggregation myagg, 
+      long[] vector, 
+      int batchSize,
+      boolean[] isNull, 
+      int[] selected) {
 
-      for (int i=0; i< batchSize; ++i) {
-        myagg.value += vector[selected[i]];
+    for (int j=0; j< batchSize; ++j) {
+      int i = selected[j];
+      if (!isNull[i]) {
+        myagg.value += vector[i];
       }
     }
+  }
 
-    private void iterateNoSelectionHasNulls(
-        Aggregation myagg, 
-        long[] vector, 
-        int batchSize,
-        boolean[] isNull) {
-      
-      for(int i=0;i<batchSize;++i) {
-        if (!isNull[i]) {
-          myagg.value += vector[i];
-        }
-      }
+  private void iterateSelectionNoNulls(
+      Aggregation myagg, 
+      long[] vector, 
+      int batchSize, 
+      int[] selected) {
+
+    for (int i=0; i< batchSize; ++i) {
+      myagg.value += vector[selected[i]];
     }
+  }
 
-    private void iterateNoSelectionNoNulls(
-        Aggregation myagg, 
-        long[] vector, 
-        int batchSize) {
+  private void iterateNoSelectionHasNulls(
+      Aggregation myagg, 
+      long[] vector, 
+      int batchSize,
+      boolean[] isNull) {
 
-      for (int i=0;i<batchSize;++i) {
+    for(int i=0;i<batchSize;++i) {
+      if (!isNull[i]) {
         myagg.value += vector[i];
       }
     }
+  }
 
-    @Override
-    public AggregationBuffer getNewAggregationBuffer() throws HiveException {
-      return new Aggregation();
-    }
+  private void iterateNoSelectionNoNulls(
+      Aggregation myagg, 
+      long[] vector, 
+      int batchSize) {
 
-    @Override
-    public void reset(AggregationBuffer agg) throws HiveException {
-      Aggregation myAgg = (Aggregation) agg;
-      myAgg.reset();
+    for (int i=0;i<batchSize;++i) {
+      myagg.value += vector[i];
     }
+  }
 
-    @Override
-    public long getAggregationBufferFixedSize() {
-      JavaDataModel model = JavaDataModel.get();
-      return JavaDataModel.alignUp(
+  @Override
+  public AggregationBuffer getNewAggregationBuffer() throws HiveException {
+    return new Aggregation();
+  }
+
+  @Override
+  public void reset(AggregationBuffer agg) throws HiveException {
+    Aggregation myAgg = (Aggregation) agg;
+    myAgg.reset();
+  }
+
+  @Override
+  public long getAggregationBufferFixedSize() {
+    JavaDataModel model = JavaDataModel.get();
+    return JavaDataModel.alignUp(
         model.object() +
         model.primitive2() +
         model.primitive1(),
         model.memoryAlign());
-    }
+  }
 
   @Override
   public boolean matches(String name, ColumnVector.Type inputColVectorType,

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64.java
index 251de3a..a503445 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64.java
@@ -333,7 +333,7 @@ public class VectorUDAFSumDecimal64 extends VectorAggregateExpression {
     long[] vector = inputVector.vector;
 
     if (inputVector.isRepeating) {
-      if (inputVector.noNulls) {
+      if (inputVector.noNulls || !inputVector.isNull[0]) {
       if (myagg.isNull) {
         myagg.isNull = false;
         myagg.sum = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64ToDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64ToDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64ToDecimal.java
index 06e319b..d091f3f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64ToDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64ToDecimal.java
@@ -358,13 +358,13 @@ public class VectorUDAFSumDecimal64ToDecimal extends VectorAggregateExpression {
     long[] vector = inputVector.vector;
 
     if (inputVector.isRepeating) {
-      if (inputVector.noNulls) {
-      if (myagg.isNull) {
-        myagg.isNull = false;
-        myagg.sum = 0;
+      if (inputVector.noNulls || !inputVector.isNull[0]) {
+        if (myagg.isNull) {
+          myagg.isNull = false;
+          myagg.sum = 0;
+        }
+        myagg.sumValueNoCheck(vector[0]*batchSize);
       }
-      myagg.sumValueNoCheck(vector[0]*batchSize);
-    }
       return;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumTimestamp.java
index 3ca5ee0..e542033 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumTimestamp.java
@@ -297,13 +297,13 @@ public class VectorUDAFSumTimestamp extends VectorAggregateExpression {
       Aggregation myagg = (Aggregation)agg;
 
       if (inputVector.isRepeating) {
-        if (inputVector.noNulls) {
-        if (myagg.isNull) {
-          myagg.isNull = false;
-          myagg.sum = 0;
+        if (inputVector.noNulls || !inputVector.isNull[0]) {
+          if (myagg.isNull) {
+            myagg.isNull = false;
+            myagg.sum = 0;
+          }
+          myagg.sum += inputVector.getDouble(0) * batchSize;
         }
-        myagg.sum += inputVector.getDouble(0) * batchSize;
-      }
         return;
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorCount.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorCount.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorCount.java
index 7166c64..9515832 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorCount.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorCount.java
@@ -62,7 +62,7 @@ public class VectorPTFEvaluatorCount extends VectorPTFEvaluatorBase {
     }
     ColumnVector colVector = batch.cols[inputColumnNum];
     if (colVector.isRepeating) {
-      if (colVector.noNulls) {
+      if (colVector.noNulls || !colVector.isNull[0]) {
         count += size;
       }
     } else if (colVector.noNulls) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalAvg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalAvg.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalAvg.java
index 85e5ebe..bd4896a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalAvg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalAvg.java
@@ -72,7 +72,7 @@ public class VectorPTFEvaluatorDecimalAvg extends VectorPTFEvaluatorBase {
     DecimalColumnVector decimalColVector = ((DecimalColumnVector) batch.cols[inputColumnNum]);
     if (decimalColVector.isRepeating) {
 
-      if (decimalColVector.noNulls) {
+      if (decimalColVector.noNulls || !decimalColVector.isNull[0]) {
 
         // We have a repeated value.  The sum increases by value * batch.size.
         temp.setFromLong(batch.size);

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalFirstValue.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalFirstValue.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalFirstValue.java
index ed11a09..daba90c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalFirstValue.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalFirstValue.java
@@ -70,7 +70,8 @@ public class VectorPTFEvaluatorDecimalFirstValue extends VectorPTFEvaluatorBase
       }
       DecimalColumnVector decimalColVector = ((DecimalColumnVector) batch.cols[inputColumnNum]);
       if (decimalColVector.isRepeating) {
-        if (decimalColVector.noNulls) {
+
+        if (decimalColVector.noNulls || !decimalColVector.isNull[0]) {
           firstValue.set(decimalColVector.vector[0]);
           isGroupResultNull = false;
         }
@@ -86,6 +87,10 @@ public class VectorPTFEvaluatorDecimalFirstValue extends VectorPTFEvaluatorBase
       haveFirstValue = true;
     }
 
+    /*
+     * Do careful maintenance of the outputColVector.noNulls flag.
+     */
+
     // First value is repeated for all batches.
     DecimalColumnVector outputColVector = (DecimalColumnVector) batch.cols[outputColumnNum];
     outputColVector.isRepeating = true;
@@ -93,7 +98,6 @@ public class VectorPTFEvaluatorDecimalFirstValue extends VectorPTFEvaluatorBase
       outputColVector.noNulls = false;
       outputColVector.isNull[0] = true;
     } else {
-      outputColVector.noNulls = true;
       outputColVector.isNull[0] = false;
       outputColVector.vector[0].set(firstValue);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalLastValue.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalLastValue.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalLastValue.java
index eb55792..b2cbdf6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalLastValue.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalLastValue.java
@@ -70,7 +70,8 @@ public class VectorPTFEvaluatorDecimalLastValue extends VectorPTFEvaluatorBase {
     }
     DecimalColumnVector decimalColVector = ((DecimalColumnVector) batch.cols[inputColumnNum]);
     if (decimalColVector.isRepeating) {
-      if (decimalColVector.noNulls) {
+
+      if (decimalColVector.noNulls || !decimalColVector.isNull[0]) {
         lastValue.set(decimalColVector.vector[0]);
         isGroupResultNull = false;
       } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalMax.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalMax.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalMax.java
index 231cf9b..4d8c7fe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalMax.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalMax.java
@@ -64,7 +64,8 @@ public class VectorPTFEvaluatorDecimalMax extends VectorPTFEvaluatorBase {
     }
     DecimalColumnVector decimalColVector = ((DecimalColumnVector) batch.cols[inputColumnNum]);
     if (decimalColVector.isRepeating) {
-      if (decimalColVector.noNulls) {
+
+      if (decimalColVector.noNulls || !decimalColVector.isNull[0]) {
         if (isGroupResultNull) {
           max.set(decimalColVector.vector[0]);
           isGroupResultNull = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalMin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalMin.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalMin.java
index 6373c09..312c43c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalMin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalMin.java
@@ -64,7 +64,8 @@ public class VectorPTFEvaluatorDecimalMin extends VectorPTFEvaluatorBase {
     }
     DecimalColumnVector decimalColVector = ((DecimalColumnVector) batch.cols[inputColumnNum]);
     if (decimalColVector.isRepeating) {
-      if (decimalColVector.noNulls) {
+
+      if (decimalColVector.noNulls || !decimalColVector.isNull[0]) {
         if (isGroupResultNull) {
           min.set(decimalColVector.vector[0]);
           isGroupResultNull = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalSum.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalSum.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalSum.java
index 36dd119..e899c36 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalSum.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDecimalSum.java
@@ -66,7 +66,7 @@ public class VectorPTFEvaluatorDecimalSum extends VectorPTFEvaluatorBase {
     DecimalColumnVector decimalColVector = ((DecimalColumnVector) batch.cols[inputColumnNum]);
     if (decimalColVector.isRepeating) {
 
-      if (decimalColVector.noNulls) {
+      if (decimalColVector.noNulls || !decimalColVector.isNull[0]) {
         temp.setFromLong(batch.size);
         if (isGroupResultNull) {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDenseRank.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDenseRank.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDenseRank.java
index 9ceeb13..e457e32 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDenseRank.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDenseRank.java
@@ -51,7 +51,6 @@ public class VectorPTFEvaluatorDenseRank extends VectorPTFEvaluatorBase {
 
     LongColumnVector longColVector = (LongColumnVector) batch.cols[outputColumnNum];
     longColVector.isRepeating = true;
-    longColVector.noNulls = true;
     longColVector.isNull[0] = false;
     longColVector.vector[0] = denseRank;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleAvg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleAvg.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleAvg.java
index 271a936..298de2d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleAvg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleAvg.java
@@ -66,7 +66,7 @@ public class VectorPTFEvaluatorDoubleAvg extends VectorPTFEvaluatorBase {
     DoubleColumnVector doubleColVector = ((DoubleColumnVector) batch.cols[inputColumnNum]);
     if (doubleColVector.isRepeating) {
 
-      if (doubleColVector.noNulls) {
+      if (doubleColVector.noNulls || !doubleColVector.isNull[0]) {
 
         // We have a repeated value.  The sum increases by value * batch.size.
         if (isGroupResultNull) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleFirstValue.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleFirstValue.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleFirstValue.java
index 9f65de4..460fbe5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleFirstValue.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleFirstValue.java
@@ -66,7 +66,8 @@ public class VectorPTFEvaluatorDoubleFirstValue extends VectorPTFEvaluatorBase {
       }
       DoubleColumnVector doubleColVector = ((DoubleColumnVector) batch.cols[inputColumnNum]);
       if (doubleColVector.isRepeating) {
-        if (doubleColVector.noNulls) {
+
+        if (doubleColVector.noNulls || !doubleColVector.isNull[0]) {
           firstValue = doubleColVector.vector[0];
           isGroupResultNull = false;
         }
@@ -82,6 +83,10 @@ public class VectorPTFEvaluatorDoubleFirstValue extends VectorPTFEvaluatorBase {
       haveFirstValue = true;
     }
 
+    /*
+     * Do careful maintenance of the outputColVector.noNulls flag.
+     */
+
     // First value is repeated for all batches.
     DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[outputColumnNum];
     outputColVector.isRepeating = true;
@@ -89,7 +94,6 @@ public class VectorPTFEvaluatorDoubleFirstValue extends VectorPTFEvaluatorBase {
       outputColVector.noNulls = false;
       outputColVector.isNull[0] = true;
     } else {
-      outputColVector.noNulls = true;
       outputColVector.isNull[0] = false;
       outputColVector.vector[0] = firstValue;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleLastValue.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleLastValue.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleLastValue.java
index 8d28994..51d06a8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleLastValue.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleLastValue.java
@@ -66,7 +66,8 @@ public class VectorPTFEvaluatorDoubleLastValue extends VectorPTFEvaluatorBase {
     }
     DoubleColumnVector doubleColVector = ((DoubleColumnVector) batch.cols[inputColumnNum]);
     if (doubleColVector.isRepeating) {
-      if (doubleColVector.noNulls) {
+
+      if (doubleColVector.noNulls || !doubleColVector.isNull[0]) {
         lastValue = doubleColVector.vector[0];
         isGroupResultNull = false;
       } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleMax.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleMax.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleMax.java
index 732369a..2bc4c6b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleMax.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleMax.java
@@ -60,7 +60,8 @@ public class VectorPTFEvaluatorDoubleMax extends VectorPTFEvaluatorBase {
     }
     DoubleColumnVector doubleColVector = ((DoubleColumnVector) batch.cols[inputColumnNum]);
     if (doubleColVector.isRepeating) {
-      if (doubleColVector.noNulls) {
+
+      if (doubleColVector.noNulls || !doubleColVector.isNull[0]) {
         if (isGroupResultNull) {
           max = doubleColVector.vector[0];
           isGroupResultNull = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleMin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleMin.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleMin.java
index 91c538d..6a422b2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleMin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleMin.java
@@ -60,7 +60,8 @@ public class VectorPTFEvaluatorDoubleMin extends VectorPTFEvaluatorBase {
     }
     DoubleColumnVector doubleColVector = ((DoubleColumnVector) batch.cols[inputColumnNum]);
     if (doubleColVector.isRepeating) {
-      if (doubleColVector.noNulls) {
+
+      if (doubleColVector.noNulls || !doubleColVector.isNull[0]) {
         if (isGroupResultNull) {
           min = doubleColVector.vector[0];
           isGroupResultNull = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleSum.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleSum.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleSum.java
index 0c534d8..2ecc4ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleSum.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorDoubleSum.java
@@ -61,7 +61,7 @@ public class VectorPTFEvaluatorDoubleSum extends VectorPTFEvaluatorBase {
     DoubleColumnVector doubleColVector = ((DoubleColumnVector) batch.cols[inputColumnNum]);
     if (doubleColVector.isRepeating) {
 
-      if (doubleColVector.noNulls) {
+      if (doubleColVector.noNulls || !doubleColVector.isNull[0]) {
         if (isGroupResultNull) {
 
           // First aggregation calculation for group.

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongAvg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongAvg.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongAvg.java
index 66e8f98..f48df25 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongAvg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongAvg.java
@@ -66,7 +66,7 @@ public class VectorPTFEvaluatorLongAvg extends VectorPTFEvaluatorBase {
     LongColumnVector longColVector = ((LongColumnVector) batch.cols[inputColumnNum]);
     if (longColVector.isRepeating) {
 
-      if (longColVector.noNulls) {
+      if (longColVector.noNulls || !longColVector.isNull[0]) {
 
         // We have a repeated value.  The sum increases by value * batch.size.
         if (isGroupResultNull) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongFirstValue.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongFirstValue.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongFirstValue.java
index 5151ecb..3deadb1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongFirstValue.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongFirstValue.java
@@ -66,7 +66,8 @@ public class VectorPTFEvaluatorLongFirstValue extends VectorPTFEvaluatorBase {
       }
       LongColumnVector longColVector = ((LongColumnVector) batch.cols[inputColumnNum]);
       if (longColVector.isRepeating) {
-        if (longColVector.noNulls) {
+
+        if (longColVector.noNulls || !longColVector.isNull[0]) {
           firstValue = longColVector.vector[0];
           isGroupResultNull = false;
         }
@@ -82,6 +83,10 @@ public class VectorPTFEvaluatorLongFirstValue extends VectorPTFEvaluatorBase {
       haveFirstValue = true;
     }
 
+    /*
+     * Do careful maintenance of the outputColVector.noNulls flag.
+     */
+
     // First value is repeated for all batches.
     LongColumnVector outputColVector = (LongColumnVector) batch.cols[outputColumnNum];
     outputColVector.isRepeating = true;
@@ -89,7 +94,6 @@ public class VectorPTFEvaluatorLongFirstValue extends VectorPTFEvaluatorBase {
       outputColVector.noNulls = false;
       outputColVector.isNull[0] = true;
     } else {
-      outputColVector.noNulls = true;
       outputColVector.isNull[0] = false;
       outputColVector.vector[0] = firstValue;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongLastValue.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongLastValue.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongLastValue.java
index fa8e880..0a5df51 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongLastValue.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongLastValue.java
@@ -66,7 +66,8 @@ public class VectorPTFEvaluatorLongLastValue extends VectorPTFEvaluatorBase {
     }
     LongColumnVector longColVector = ((LongColumnVector) batch.cols[inputColumnNum]);
     if (longColVector.isRepeating) {
-      if (longColVector.noNulls) {
+
+      if (longColVector.noNulls || !longColVector.isNull[0]) {
         lastValue = longColVector.vector[0];
         isGroupResultNull = false;
       } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongMax.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongMax.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongMax.java
index b60b03b..0e7eb07 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongMax.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongMax.java
@@ -60,7 +60,8 @@ public class VectorPTFEvaluatorLongMax extends VectorPTFEvaluatorBase {
     }
     LongColumnVector longColVector = ((LongColumnVector) batch.cols[inputColumnNum]);
     if (longColVector.isRepeating) {
-      if (longColVector.noNulls) {
+
+      if (longColVector.noNulls || !longColVector.isNull[0]) {
         if (isGroupResultNull) {
           max = longColVector.vector[0];
           isGroupResultNull = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongMin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongMin.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongMin.java
index 26ea0df..df31c51 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongMin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongMin.java
@@ -60,7 +60,8 @@ public class VectorPTFEvaluatorLongMin extends VectorPTFEvaluatorBase {
     }
     LongColumnVector longColVector = ((LongColumnVector) batch.cols[inputColumnNum]);
     if (longColVector.isRepeating) {
-      if (longColVector.noNulls) {
+
+      if (longColVector.noNulls || !longColVector.isNull[0]) {
         if (isGroupResultNull) {
           min = longColVector.vector[0];
           isGroupResultNull = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongSum.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongSum.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongSum.java
index ce0acb5..9402218 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongSum.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorLongSum.java
@@ -61,7 +61,7 @@ public class VectorPTFEvaluatorLongSum extends VectorPTFEvaluatorBase {
     LongColumnVector longColVector = ((LongColumnVector) batch.cols[inputColumnNum]);
     if (longColVector.isRepeating) {
 
-      if (longColVector.noNulls) {
+      if (longColVector.noNulls || !longColVector.isNull[0]) {
         if (isGroupResultNull) {
 
           // First aggregation calculation for group.

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorRank.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorRank.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorRank.java
index f7080e5..34add61 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorRank.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFEvaluatorRank.java
@@ -50,9 +50,12 @@ public class VectorPTFEvaluatorRank extends VectorPTFEvaluatorBase {
   public void evaluateGroupBatch(VectorizedRowBatch batch, boolean isLastGroupBatch) {
     evaluateInputExpr(batch);
 
+    /*
+     * Do careful maintenance of the outputColVector.noNulls flag.
+     */
+
     LongColumnVector longColVector = (LongColumnVector) batch.cols[outputColumnNum];
     longColVector.isRepeating = true;
-    longColVector.noNulls = true;
     longColVector.isNull[0] = false;
     longColVector.vector[0] = rank;
     groupCount += batch.size;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFGroupBatches.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFGroupBatches.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFGroupBatches.java
index f23a8b3..607d34a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFGroupBatches.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFGroupBatches.java
@@ -178,6 +178,11 @@ public class VectorPTFGroupBatches {
   }
 
   private void fillGroupResults(VectorizedRowBatch batch) {
+
+    /*
+     * Do careful maintenance of the outputColVector.noNulls flag.
+     */
+
     for (VectorPTFEvaluatorBase evaluator : evaluators) {
       final int outputColumnNum = evaluator.getOutputColumnNum();
       if (evaluator.streamsResult()) {
@@ -190,7 +195,6 @@ public class VectorPTFGroupBatches {
       if (isGroupResultNull) {
         outputColVector.noNulls = false;
       } else {
-        outputColVector.noNulls = true;
         switch (evaluator.getResultColumnVectorType()) {
         case LONG:
           ((LongColumnVector) outputColVector).vector[0] = evaluator.getLongGroupResult();

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFOperator.java
index c9717ba..39fab2c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/ptf/VectorPTFOperator.java
@@ -434,6 +434,7 @@ public class VectorPTFOperator extends Operator<PTFDesc>
         return true;
       }
       if (isNull) {
+        // NULL does equal NULL here.
         continue;
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
index a1a1282..82b7a15 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
@@ -140,7 +140,9 @@ public class VectorUDFAdaptor extends VectorExpression {
       return;
     }
 
-    batch.cols[outputColumnNum].noNulls = true;
+    /*
+     * Do careful maintenance of the outputColVector.noNulls flag.
+     */
 
     /* If all input columns are repeating, just evaluate function
      * for row 0 in the batch and set output repeating.

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 1186bd4..8ce63f9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -1702,7 +1702,7 @@ public class Vectorizer implements PhysicalPlanResolver {
     private boolean validateAndVectorizeMapOperators(MapWork mapWork, TableScanOperator tableScanOperator,
         boolean isTezOrSpark, VectorTaskColumnInfo vectorTaskColumnInfo) throws SemanticException {
 
-      LOG.info("Validating and vectorizing MapWork...");
+      LOG.info("Validating and vectorizing MapWork... (vectorizedVertexNum " + vectorizedVertexNum + ")");
 
       // Set "global" member indicating where to store "not vectorized" information if necessary.
       currentBaseWork = mapWork;
@@ -1906,7 +1906,7 @@ public class Vectorizer implements PhysicalPlanResolver {
         VectorTaskColumnInfo vectorTaskColumnInfo)
             throws SemanticException {
 
-      LOG.info("Validating and vectorizing ReduceWork...");
+      LOG.info("Validating and vectorizing ReduceWork... (vectorizedVertexNum " + vectorizedVertexNum + ")");
 
       Operator<? extends OperatorDesc> newVectorReducer;
       try {
@@ -4102,9 +4102,6 @@ public class Vectorizer implements PhysicalPlanResolver {
     for (int i = 0; i < size; i++) {
       ExprNodeDesc expr = colList.get(i);
       VectorExpression ve = vContext.getVectorExpression(expr);
-      if (ve.getOutputColumnNum() == -1) {
-        fake++;
-      }
       projectedOutputColumns[i] = ve.getOutputColumnNum();
       if (ve instanceof IdentityExpression) {
         // Suppress useless evaluation.

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorArithmeticExpressions.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorArithmeticExpressions.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorArithmeticExpressions.java
index acb3198..f2adc08 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorArithmeticExpressions.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorArithmeticExpressions.java
@@ -341,8 +341,7 @@ public class TestVectorArithmeticExpressions {
     lcv1.noNulls = true;
     lcv0.isRepeating = false;
     lcv1.isRepeating = false;
-    lcv2.noNulls = false;         // set output noNulls to true to make sure it gets over-written
-    lcv2.isRepeating = true;      // similarly with isRepeating
+    lcv2.reset();
     expr.evaluate(vrg);
     assertTrue(lcv2.noNulls);
     assertFalse(lcv2.isRepeating);

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorConditionalExpressions.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorConditionalExpressions.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorConditionalExpressions.java
index c646bf1..ea19e93 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorConditionalExpressions.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorConditionalExpressions.java
@@ -188,7 +188,6 @@ public class TestVectorConditionalExpressions {
     assertEquals(2, r.vector[1]);
     assertEquals(-3, r.vector[2]);
     assertEquals(-4, r.vector[3]);
-    assertEquals(true, r.noNulls);
     assertEquals(false, r.isRepeating);
 
     // verify when first argument (boolean flags) is repeating
@@ -230,7 +229,6 @@ public class TestVectorConditionalExpressions {
     assertEquals(2, r.vector[1]);
     assertEquals(3, r.vector[2]);
     assertEquals(-4, r.vector[3]);
-    assertEquals(true, r.noNulls);
     assertEquals(false, r.isRepeating);
 
     // test when second argument has nulls
@@ -308,7 +306,6 @@ public class TestVectorConditionalExpressions {
     assertEquals(true, 2d == r.vector[1]);
     assertEquals(true, -3d == r.vector[2]);
     assertEquals(true, -4d == r.vector[3]);
-    assertEquals(true, r.noNulls);
     assertEquals(false, r.isRepeating);
   }
 
@@ -480,7 +477,6 @@ public class TestVectorConditionalExpressions {
     assertTrue(getString(r, 1).equals("scalar"));
     assertTrue(getString(r, 2).equals("arg2_2"));
     assertTrue(getString(r, 3).equals("arg2_3"));
-    assertTrue(r.noNulls);
 
     // test for null input strings
     batch = getBatch1Long3BytesVectors();
@@ -504,7 +500,6 @@ public class TestVectorConditionalExpressions {
     assertTrue(getString(r, 1).equals("arg3_1"));
     assertTrue(getString(r, 2).equals("scalar"));
     assertTrue(getString(r, 3).equals("scalar"));
-    assertTrue(r.noNulls);
 
     // test for null input strings
     batch = getBatch1Long3BytesVectors();

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorLogicalExpressions.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorLogicalExpressions.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorLogicalExpressions.java
index bd5a6b7..a60b9e4 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorLogicalExpressions.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorLogicalExpressions.java
@@ -67,7 +67,6 @@ public class TestVectorLogicalExpressions {
     expr.evaluate(batch);
 
     // spot check
-    Assert.assertTrue(outCol.noNulls);
     Assert.assertEquals(0, outCol.vector[0]);
     Assert.assertEquals(1, outCol.vector[1]);
     Assert.assertEquals(1, outCol.vector[2]);
@@ -125,7 +124,6 @@ public class TestVectorLogicalExpressions {
     expr.evaluate(batch);
 
     // spot check
-    Assert.assertTrue(outCol.noNulls);
     Assert.assertEquals(0, outCol.vector[0]);
     Assert.assertEquals(0, outCol.vector[1]);
     Assert.assertEquals(0, outCol.vector[2]);
@@ -207,7 +205,6 @@ public class TestVectorLogicalExpressions {
     batch.cols[0].noNulls = true;
     expr.evaluate(batch);
     Assert.assertFalse(outCol.isRepeating);
-    Assert.assertTrue(outCol.noNulls);
     Assert.assertEquals(1, outCol.vector[0]);
     Assert.assertEquals(0, outCol.vector[2]);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringExpressions.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringExpressions.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringExpressions.java
index ca3c259..202f18c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringExpressions.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringExpressions.java
@@ -3757,7 +3757,6 @@ public class TestVectorStringExpressions {
     expr.evaluate(batch);
     Assert.assertEquals(4, batch.size);
     outVector = ((LongColumnVector) batch.cols[3]).vector;
-    Assert.assertFalse(batch.cols[3].noNulls);
     Assert.assertFalse(batch.cols[3].isNull[0]);
     Assert.assertEquals(1, outVector[0]);
     Assert.assertFalse(batch.cols[3].isNull[1]);
@@ -3821,7 +3820,6 @@ public class TestVectorStringExpressions {
     expr.evaluate(batch);
     outVector = ((LongColumnVector) batch.cols[3]).vector;
     Assert.assertEquals(4, batch.size);
-    Assert.assertFalse(batch.cols[3].noNulls);
     Assert.assertFalse(batch.cols[3].isNull[0]);
     Assert.assertEquals(1, outVector[0]);
     Assert.assertFalse(batch.cols[3].isNull[1]);
@@ -4064,7 +4062,6 @@ public class TestVectorStringExpressions {
         outCol.start[0], outCol.length[0]);
     Assert.assertEquals(0, cmp);
     Assert.assertTrue(outCol.isRepeating);
-    Assert.assertFalse(outCol.noNulls);
 
     // no nulls, is repeating
     batch = makeStringBatchMixedCase();
@@ -4124,7 +4121,6 @@ public class TestVectorStringExpressions {
     expr.evaluate(batch);
     outCol = (LongColumnVector) batch.cols[1];
     Assert.assertTrue(outCol.isRepeating);
-    Assert.assertFalse(outCol.noNulls);
     Assert.assertEquals(7, outCol.vector[0]); // length of "mixedUp"
 
     // no nulls, is repeating
@@ -4486,7 +4482,6 @@ public class TestVectorStringExpressions {
         outCol.start[0], outCol.length[0]);
     Assert.assertEquals(0, cmp);
     Assert.assertTrue(outCol.isRepeating);
-    Assert.assertFalse(outCol.noNulls);
 
     // no nulls, is repeating
     batch = makeStringBatch();
@@ -4549,7 +4544,6 @@ public class TestVectorStringExpressions {
         outCol.start[0], outCol.length[0]);
     Assert.assertEquals(0, cmp);
     Assert.assertTrue(outCol.isRepeating);
-    Assert.assertFalse(outCol.noNulls);
 
     // no nulls, is repeating
     batch = makeStringBatch();
@@ -4612,7 +4606,6 @@ public class TestVectorStringExpressions {
         outCol.start[0], outCol.length[0]);
     Assert.assertEquals(0, cmp);
     Assert.assertTrue(outCol.isRepeating);
-    Assert.assertFalse(outCol.noNulls);
 
     // no nulls, is repeating
     batch = makeStringBatch();
@@ -4675,7 +4668,6 @@ public class TestVectorStringExpressions {
         outCol.start[0], outCol.length[0]);
     Assert.assertEquals(0, cmp);
     Assert.assertTrue(outCol.isRepeating);
-    Assert.assertFalse(outCol.noNulls);
 
     // no nulls, is repeating
     batch = makeStringBatch();
@@ -4738,7 +4730,6 @@ public class TestVectorStringExpressions {
         outCol.start[0], outCol.length[0]);
     Assert.assertEquals(0, cmp);
     Assert.assertTrue(outCol.isRepeating);
-    Assert.assertFalse(outCol.noNulls);
 
     // no nulls, is repeating
     batch = makeStringBatch();
@@ -4801,7 +4792,6 @@ public class TestVectorStringExpressions {
         outCol.start[0], outCol.length[0]);
     Assert.assertEquals(0, cmp);
     Assert.assertTrue(outCol.isRepeating);
-    Assert.assertFalse(outCol.noNulls);
 
     // no nulls, is repeating
     batch = makeStringBatch();
@@ -4923,7 +4913,6 @@ public class TestVectorStringExpressions {
     batch.cols[0].noNulls = true;
     expr.evaluate(batch);
     Assert.assertEquals(false, outCol.isRepeating);
-    Assert.assertEquals(true,  outCol.noNulls);
     cmp = StringExpr.compare(red, 0, red.length, outCol.vector[2],
         outCol.start[2], outCol.length[2]);
     Assert.assertEquals(0, cmp);
@@ -5015,7 +5004,6 @@ public class TestVectorStringExpressions {
     expr.evaluate(batch);
     outCol = (BytesColumnVector) batch.cols[1];
     Assert.assertEquals(3, batch.size);
-    Assert.assertTrue(outCol.noNulls);
     Assert.assertFalse(outCol.isRepeating);
     Assert.assertEquals(0,
     StringExpr.compare(
@@ -5043,7 +5031,6 @@ public class TestVectorStringExpressions {
     expr = new StringSubstrColStart(0, 1, 1);
     expr.evaluate(batch);
     Assert.assertEquals(3, batch.size);
-    Assert.assertTrue(outCol.noNulls);
     Assert.assertFalse(outCol.isRepeating);
 
     Assert.assertEquals(0,
@@ -5128,7 +5115,6 @@ public class TestVectorStringExpressions {
     expr.evaluate(batch);
     outCol = (BytesColumnVector) batch.cols[1];
     Assert.assertFalse(outV.isRepeating);
-    Assert.assertTrue(outV.noNulls);
     Assert.assertEquals(0,
     StringExpr.compare(
             // 3nd char starts from index 3 and total length should be 7 bytes as max is 10
@@ -5153,7 +5139,6 @@ public class TestVectorStringExpressions {
     expr = new StringSubstrColStart(0, 2, 1);
     expr.evaluate(batch);
     Assert.assertFalse(outV.isRepeating);
-    Assert.assertTrue(outV.noNulls);
     Assert.assertEquals(0,
     StringExpr.compare(
             // the result is the last 1 character, which occupies 4 bytes
@@ -5190,7 +5175,6 @@ public class TestVectorStringExpressions {
     expr.evaluate(batch);
     BytesColumnVector outCol = (BytesColumnVector) batch.cols[1];
     Assert.assertEquals(3, batch.size);
-    Assert.assertTrue(outCol.noNulls);
     Assert.assertFalse(outCol.isRepeating);
     byte[] expected = "string".getBytes("UTF-8");
     Assert.assertEquals(0,
@@ -5218,7 +5202,6 @@ public class TestVectorStringExpressions {
     expr = new StringSubstrColStartLen(0, -6, 6, 1);
     expr.evaluate(batch);
     outCol = (BytesColumnVector) batch.cols[1];
-    Assert.assertTrue(outCol.noNulls);
     Assert.assertFalse(outCol.isRepeating);
     Assert.assertEquals(3, batch.size);
 
@@ -5250,7 +5233,6 @@ public class TestVectorStringExpressions {
     outCol = (BytesColumnVector) batch.cols[1];
     expr.evaluate(batch);
     Assert.assertEquals(3, batch.size);
-    Assert.assertTrue(outCol.noNulls);
     Assert.assertFalse(outCol.isRepeating);
     Assert.assertEquals(0,
         StringExpr.compare(
@@ -5280,7 +5262,6 @@ public class TestVectorStringExpressions {
     outCol = (BytesColumnVector) batch.cols[1];
     expr.evaluate(batch);
     Assert.assertEquals(3, batch.size);
-    Assert.assertTrue(outCol.noNulls);
     Assert.assertFalse(outCol.isRepeating);
     Assert.assertEquals(0,
     StringExpr.compare(
@@ -5310,7 +5291,6 @@ public class TestVectorStringExpressions {
     expr.evaluate(batch);
     outCol = (BytesColumnVector) batch.cols[1];
     Assert.assertEquals(3, batch.size);
-    Assert.assertTrue(outCol.noNulls);
     Assert.assertFalse(outCol.isRepeating);
     Assert.assertEquals(0,
     StringExpr.compare(
@@ -5391,7 +5371,6 @@ public class TestVectorStringExpressions {
     expr.evaluate(batch);
     Assert.assertEquals(1, batch.size);
     Assert.assertFalse(outV.isRepeating);
-    Assert.assertTrue(outV.noNulls);
     Assert.assertEquals(0,
     StringExpr.compare(
             // 3rd char starts at index 3, and with length 2 it is covering the rest of the array.
@@ -5415,7 +5394,6 @@ public class TestVectorStringExpressions {
     outCol = (BytesColumnVector) batch.cols[1];
     Assert.assertEquals(1, batch.size);
     Assert.assertFalse(outV.isRepeating);
-    Assert.assertTrue(outV.noNulls);
     Assert.assertEquals(0,
     StringExpr.compare(
             // 2nd substring index refers to the 6th index (last char in the array)

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q b/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q
index 449bea2..ab65e9d 100644
--- a/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q
+++ b/ql/src/test/queries/clientpositive/vector_adaptor_usage_mode.q
@@ -1,4 +1,4 @@
-SET hive.vectorized.execution.enabled=true;
+SET hive.vectorized.execution.enabled=false;
 set hive.fetch.task.conversion=none;
 SET hive.auto.convert.join=true;
 
@@ -10,6 +10,9 @@ create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)
 insert overwrite table varchar_udf_1
   select key, value, key, value from src where key = '238' limit 1;
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into varchar_udf_1 values (NULL, NULL, NULL, NULL);
+
 DROP TABLE IF EXISTS DECIMAL_UDF_txt;
 DROP TABLE IF EXISTS DECIMAL_UDF;
 
@@ -25,11 +28,17 @@ STORED AS ORC;
 
 INSERT OVERWRITE TABLE DECIMAL_UDF SELECT * FROM DECIMAL_UDF_txt;
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into DECIMAL_UDF values (NULL, NULL);
+
 drop table if exists count_case_groupby;
 
 create table count_case_groupby (key string, bool boolean) STORED AS orc;
 insert into table count_case_groupby values ('key1', true),('key2', false),('key3', NULL),('key4', false),('key5',NULL);
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into table count_case_groupby values (NULL, NULL);
+
 set hive.vectorized.adaptor.usage.mode=none;
 
 explain vectorization expression
@@ -37,39 +46,39 @@ select
   c2 regexp 'val',
   c4 regexp 'val',
   (c2 regexp 'val') = (c4 regexp 'val')
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 select
   c2 regexp 'val',
   c4 regexp 'val',
   (c2 regexp 'val') = (c4 regexp 'val')
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 explain vectorization expression
 select
   regexp_extract(c2, 'val_([0-9]+)', 1),
   regexp_extract(c4, 'val_([0-9]+)', 1),
   regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 select
   regexp_extract(c2, 'val_([0-9]+)', 1),
   regexp_extract(c4, 'val_([0-9]+)', 1),
   regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 explain vectorization expression
 select
   regexp_replace(c2, 'val', 'replaced'),
   regexp_replace(c4, 'val', 'replaced'),
   regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 select
   regexp_replace(c2, 'val', 'replaced'),
   regexp_replace(c4, 'val', 'replaced'),
   regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 
 set hive.vectorized.adaptor.usage.mode=chosen;
@@ -79,39 +88,39 @@ select
   c2 regexp 'val',
   c4 regexp 'val',
   (c2 regexp 'val') = (c4 regexp 'val')
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 select
   c2 regexp 'val',
   c4 regexp 'val',
   (c2 regexp 'val') = (c4 regexp 'val')
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 explain vectorization expression
 select
   regexp_extract(c2, 'val_([0-9]+)', 1),
   regexp_extract(c4, 'val_([0-9]+)', 1),
   regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 select
   regexp_extract(c2, 'val_([0-9]+)', 1),
   regexp_extract(c4, 'val_([0-9]+)', 1),
   regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 explain vectorization expression
 select
   regexp_replace(c2, 'val', 'replaced'),
   regexp_replace(c4, 'val', 'replaced'),
   regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 select
   regexp_replace(c2, 'val', 'replaced'),
   regexp_replace(c4, 'val', 'replaced'),
   regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from varchar_udf_1 limit 1;
+from varchar_udf_1;
 
 
 set hive.vectorized.adaptor.usage.mode=none;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_between_columns.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_between_columns.q b/ql/src/test/queries/clientpositive/vector_between_columns.q
index 5a2714e..a8e9ca4 100644
--- a/ql/src/test/queries/clientpositive/vector_between_columns.q
+++ b/ql/src/test/queries/clientpositive/vector_between_columns.q
@@ -22,8 +22,13 @@ load data local inpath '../../data/files/TINT' into table TINT_txt;
 
 create table TSINT stored as orc AS SELECT * FROM TSINT_txt;
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into TSINT values (NULL, NULL);
+
 create table TINT stored as orc AS SELECT * FROM TINT_txt;
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into TINT values (NULL, NULL);
 
 explain vectorization expression
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint, (case when (tint.cint between tsint.csint and tsint.csint) then "Ok" else "NoOk" end) as between_col from tint , tsint;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_between_in.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_between_in.q b/ql/src/test/queries/clientpositive/vector_between_in.q
index c336829..b87d646 100644
--- a/ql/src/test/queries/clientpositive/vector_between_in.q
+++ b/ql/src/test/queries/clientpositive/vector_between_in.q
@@ -5,6 +5,9 @@ set hive.fetch.task.conversion=none;
 
 CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate;
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into decimal_date_test values (NULL, NULL, NULL, NULL);
+
 EXPLAIN VECTORIZATION EXPRESSION SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate;
 
 EXPLAIN VECTORIZATION EXPRESSION SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE));

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_bround.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_bround.q b/ql/src/test/queries/clientpositive/vector_bround.q
index ec192bf..0bada68 100644
--- a/ql/src/test/queries/clientpositive/vector_bround.q
+++ b/ql/src/test/queries/clientpositive/vector_bround.q
@@ -14,6 +14,9 @@ values
 (2.51, 1.251),
 (3.51, 1.351);
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into test_vector_bround values (NULL, NULL);
+
 set hive.vectorized.execution.enabled=true;
 
 explain vectorization detail

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_char_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_char_2.q b/ql/src/test/queries/clientpositive/vector_char_2.q
index 5520ddd..d3a2e93 100644
--- a/ql/src/test/queries/clientpositive/vector_char_2.q
+++ b/ql/src/test/queries/clientpositive/vector_char_2.q
@@ -12,6 +12,9 @@ create table char_2 (
 
 insert overwrite table char_2 select * from src;
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into char_2 values (NULL, NULL);
+
 select value, sum(cast(key as int)), count(*) numrows
 from src
 group by value

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_coalesce_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_coalesce_2.q b/ql/src/test/queries/clientpositive/vector_coalesce_2.q
index ea45ddd..a264edd 100644
--- a/ql/src/test/queries/clientpositive/vector_coalesce_2.q
+++ b/ql/src/test/queries/clientpositive/vector_coalesce_2.q
@@ -1,12 +1,15 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
-SET hive.vectorized.execution.enabled=false;
+SET hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
 
 create table str_str_orc (str1 string, str2 string) stored as orc;
 
 insert into table str_str_orc values (null, "X"), ("0", "X"), ("1", "X"), (null, "y");
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into str_str_orc values (NULL, NULL);
+
 EXPLAIN VECTORIZATION EXPRESSION
 SELECT
    str2, ROUND(sum(cast(COALESCE(str1, 0) as int))/60, 2) as result

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_coalesce_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_coalesce_3.q b/ql/src/test/queries/clientpositive/vector_coalesce_3.q
index e3d9f0a..7d5e82c 100644
--- a/ql/src/test/queries/clientpositive/vector_coalesce_3.q
+++ b/ql/src/test/queries/clientpositive/vector_coalesce_3.q
@@ -11,7 +11,12 @@ CREATE TABLE test_2 (member BIGINT) STORED AS ORC;
 INSERT INTO test_1 VALUES (3,1),(2,2); 
 INSERT INTO test_2 VALUES (1),(2),(3),(4); 
 
-EXPLAIN
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into test_1 values (NULL, NULL);
+insert into test_2 values (NULL);
+
+
+EXPLAIN VECTORIZATION DETAIL
 SELECT m.member, (CASE WHEN COALESCE(n.attr, 5)>1 THEN n.attr END) AS attr 
 FROM test_2 m LEFT JOIN test_1 n ON m.member = n.member; 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_data_types.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_data_types.q b/ql/src/test/queries/clientpositive/vector_data_types.q
index 240fc89..b796e71 100644
--- a/ql/src/test/queries/clientpositive/vector_data_types.q
+++ b/ql/src/test/queries/clientpositive/vector_data_types.q
@@ -37,6 +37,9 @@ STORED AS ORC;
 
 INSERT INTO TABLE over1korc SELECT * FROM over1k;
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into over1korc values (NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL, NULL,NULL);
+
 SET hive.vectorized.execution.enabled=false;
 
 EXPLAIN VECTORIZATION EXPRESSION SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20;
@@ -52,5 +55,9 @@ EXPLAIN VECTORIZATION EXPRESSION select t, si, i, b, f, d, bo, s, ts, `dec`, bin
 
 SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i LIMIT 20;
 
+EXPLAIN VECTORIZATION EXPRESSION 
+SELECT SUM(HASH(*))
+FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q;
+
 SELECT SUM(HASH(*))
 FROM (SELECT t, si, i, b, f, d, bo, s, ts, `dec`, bin FROM over1korc ORDER BY t, si, i) as q;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_date_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_date_1.q b/ql/src/test/queries/clientpositive/vector_date_1.q
index 0055973..bb515b1 100644
--- a/ql/src/test/queries/clientpositive/vector_date_1.q
+++ b/ql/src/test/queries/clientpositive/vector_date_1.q
@@ -2,6 +2,7 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 set hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
+set hive.cli.print.header=true;
 
 drop table if exists vector_date_1;
 create table vector_date_1 (dt1 date, dt2 date) stored as orc;
@@ -13,8 +14,10 @@ insert into table vector_date_1
 insert into table vector_date_1
   select date '2001-01-01', date '2001-06-01' from src limit 1;
 
+select * from vector_date_1 order by dt1, dt2;
+
 -- column-to-column comparison in select clause
-explain
+explain vectorization detail
 select
   dt1, dt2,
   -- should be all true
@@ -41,7 +44,7 @@ select
   dt2 > dt1
 from vector_date_1 order by dt1;
 
-explain
+explain vectorization detail
 select
   dt1, dt2,
   -- should be all false
@@ -69,7 +72,7 @@ select
 from vector_date_1 order by dt1;
 
 -- column-to-literal/literal-to-column comparison in select clause
-explain
+explain vectorization detail
 select
   dt1,
   -- should be all true
@@ -96,7 +99,7 @@ select
   date '1970-01-01' < dt1
 from vector_date_1 order by dt1;
 
-explain
+explain vectorization detail
 select
   dt1,
   -- should all be false
@@ -126,7 +129,7 @@ from vector_date_1 order by dt1;
 
 -- column-to-column comparisons in predicate
 -- all rows with non-null dt1 should be returned
-explain
+explain vectorization detail
 select
   dt1, dt2
 from vector_date_1
@@ -153,7 +156,7 @@ order by dt1;
 
 -- column-to-literal/literal-to-column comparison in predicate
 -- only a single row should be returned
-explain
+explain vectorization detail
 select
   dt1, dt2
 from vector_date_1
@@ -182,7 +185,7 @@ where
   and date '1970-01-01' <= dt1
 order by dt1;
 
-EXPLAIN VECTORIZATION EXPRESSION
+EXPLAIN VECTORIZATION DETAIL
 SELECT dt1 FROM vector_date_1 WHERE dt1 IN (date '1970-01-01', date '2001-01-01');
 
 SELECT dt1 FROM vector_date_1 WHERE dt1 IN (date '1970-01-01', date '2001-01-01');

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_decimal_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_1.q b/ql/src/test/queries/clientpositive/vector_decimal_1.q
index 321275f..d47de3a 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_1.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_1.q
@@ -11,6 +11,9 @@ desc decimal_1;
 
 insert overwrite table decimal_1
   select cast('17.29' as decimal(4,2)), 3.1415926BD, 3115926.54321BD from src tablesample (1 rows);
+  
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into decimal_1 values (NULL, NULL, NULL);
 
 explain vectorization detail
 select cast(t as boolean) from decimal_1 order by t;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q b/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q
index 6fbf4ba..c3a8318 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q
@@ -1,11 +1,15 @@
 set hive.explain.user=false;
 set hive.fetch.task.conversion=none;
+set hive.stats.column.autogather=true;
 
 CREATE TABLE decimal_vgby STORED AS ORC AS 
     SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, 
     CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2,
     cint
     FROM alltypesorc;
+  
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into decimal_vgby values (NULL, NULL, NULL, NULL);
 
 SET hive.vectorized.execution.enabled=true;
 
@@ -49,6 +53,9 @@ CREATE TABLE decimal_vgby_small STORED AS TEXTFILE AS
     cint
     FROM alltypesorc;
 
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into decimal_vgby_small values (NULL, NULL, NULL, NULL);
+
 EXPLAIN VECTORIZATION DETAIL
 SELECT cint,
     COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1),
@@ -63,6 +70,13 @@ SELECT cint,
     GROUP BY cint
     HAVING COUNT(*) > 1;
 
+SELECT SUM(HASH(*))
+FROM (SELECT cint,
+    COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1),
+    COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2)
+    FROM decimal_vgby_small
+    GROUP BY cint) q;
+
 -- Now add the others...
 EXPLAIN VECTORIZATION DETAIL
 SELECT cint,
@@ -77,3 +91,10 @@ SELECT cint,
     FROM decimal_vgby_small
     GROUP BY cint
     HAVING COUNT(*) > 1;
+
+SELECT SUM(HASH(*))
+FROM (SELECT cint,
+    COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
+    COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2), AVG(cdecimal2), STDDEV_POP(cdecimal2), STDDEV_SAMP(cdecimal2)
+    FROM decimal_vgby_small
+    GROUP BY cint) q;

http://git-wip-us.apache.org/repos/asf/hive/blob/a4689020/ql/src/test/queries/clientpositive/vector_decimal_expressions.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_expressions.q b/ql/src/test/queries/clientpositive/vector_decimal_expressions.q
index 0b41eec..6790f27 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_expressions.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_expressions.q
@@ -1,10 +1,16 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 set hive.fetch.task.conversion=none;
+set hive.stats.column.autogather=false;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE decimal_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc;
+CREATE TABLE decimal_test (cdouble double,cdecimal1 DECIMAL(20,10), cdecimal2 DECIMAL(23,14)) STORED AS ORC;
+
+-- Add a single NULL row that will come from ORC as isRepeated.
+insert into decimal_test values (NULL, NULL, NULL);
+
+INSERT INTO TABLE decimal_test SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc;
 
 SET hive.vectorized.execution.enabled=true;
 
@@ -17,6 +23,10 @@ SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1
 ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14
 LIMIT 10;
 
+SELECT SUM(HASH(*))
+FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
+ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14) q;
+
 -- DECIMAL_64
 
 CREATE TABLE decimal_test_small STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(10,3)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(7,2)) AS cdecimal2 FROM alltypesorc;
@@ -29,3 +39,7 @@ LIMIT 10;
 SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
 ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14
 LIMIT 10;
+
+SELECT SUM(HASH(*))
+FROM (SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test_small WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
+ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14) q;