You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/03/22 16:57:25 UTC

[02/34] hive git commit: HIVE-18979: Enable AggregateReduceFunctionsRule from Calcite (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

http://git-wip-us.apache.org/repos/asf/hive/blob/5cb8867b/ql/src/test/results/clientpositive/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_13.q.out b/ql/src/test/results/clientpositive/vectorization_13.q.out
index e41a0d7..96eda74 100644
--- a/ql/src/test/results/clientpositive/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_13.q.out
@@ -91,26 +91,27 @@ STAGE PLANS:
               predicate: (((UDFToDouble(ctimestamp1) > 11.0D) and (UDFToDouble(ctimestamp2) <> 12.0D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (cdouble <= 10.175D) and (cboolean1 <> 1))) (type: boolean)
               Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
-                outputColumnNames: ctinyint, cfloat, cstring1, ctimestamp1, cboolean1
+                expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), UDFToDouble(cfloat) (type: double), (UDFToDouble(cfloat) * UDFToDouble(cfloat)) (type: double), UDFToDouble(ctinyint) (type: double), (UDFToDouble(ctinyint) * UDFToDouble(ctinyint)) (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                 Select Vectorization:
                     className: VectorSelectOperator
                     native: true
-                    projectedOutputColumnNums: [0, 4, 6, 8, 10]
+                    projectedOutputColumnNums: [10, 0, 8, 4, 6, 4, 13, 15, 18]
+                    selectExpressions: DoubleColMultiplyDoubleColumn(col 4:double, col 4:double)(children: col 4:float, col 4:float) -> 13:double, CastLongToDouble(col 0:tinyint) -> 15:double, DoubleColMultiplyDoubleColumn(col 16:double, col 17:double)(children: CastLongToDouble(col 0:tinyint) -> 16:double, CastLongToDouble(col 0:tinyint) -> 17:double) -> 18:double
                 Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: max(ctinyint), sum(cfloat), stddev_pop(cfloat), stddev_pop(ctinyint), max(cfloat), min(ctinyint)
+                  aggregations: max(_col1), sum(_col3), sum(_col6), sum(_col5), count(_col3), sum(_col8), sum(_col7), count(_col1), max(_col3), min(_col1)
                   Group By Vectorization:
-                      aggregators: VectorUDAFMaxLong(col 0:tinyint) -> tinyint, VectorUDAFSumDouble(col 4:float) -> double, VectorUDAFVarDouble(col 4:float) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_pop, VectorUDAFVarLong(col 0:tinyint) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_pop, VectorUDAFMaxDouble(col 4:float) -> float, VectorUDAFMinLong(col 0:tinyint) -> tinyint
+                      aggregators: VectorUDAFMaxLong(col 0:tinyint) -> tinyint, VectorUDAFSumDouble(col 4:float) -> double, VectorUDAFSumDouble(col 13:double) -> double, VectorUDAFSumDouble(col 4:double) -> double, VectorUDAFCount(col 4:float) -> bigint, VectorUDAFSumDouble(col 18:double) -> double, VectorUDAFSumDouble(col 15:double) -> double, VectorUDAFCount(col 0:tinyint) -> bigint, VectorUDAFMaxDouble(col 4:float) -> float, VectorUDAFMinLong(col 0:tinyint) -> tinyint
                       className: VectorGroupByOperator
                       groupByMode: HASH
                       keyExpressions: col 10:boolean, col 0:tinyint, col 8:timestamp, col 4:float, col 6:string
                       native: false
                       vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5]
-                  keys: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
+                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+                  keys: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
                   Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
@@ -122,7 +123,7 @@ STAGE PLANS:
                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                     Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: float), _col10 (type: tinyint)
+                    value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: bigint), _col10 (type: double), _col11 (type: double), _col12 (type: bigint), _col13 (type: float), _col14 (type: tinyint)
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -138,20 +139,20 @@ STAGE PLANS:
               includeColumns: [0, 4, 5, 6, 8, 9, 10]
               dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
               partitionColumnCount: 0
-              scratchColumnTypeNames: [double, decimal(11,4)]
+              scratchColumnTypeNames: [double, decimal(11,4), double, double, double, double]
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
           enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Reduce Operator Tree:
         Group By Operator
-          aggregations: max(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), min(VALUE._col5)
+          aggregations: max(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2), sum(VALUE._col3), count(VALUE._col4), sum(VALUE._col5), sum(VALUE._col6), count(VALUE._col7), max(VALUE._col8), min(VALUE._col9)
           keys: KEY._col0 (type: boolean), KEY._col1 (type: tinyint), KEY._col2 (type: timestamp), KEY._col3 (type: float), KEY._col4 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
           Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint)
+            expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), power(((_col7 - ((_col8 * _col8) / _col9)) / _col9), 0.5) (type: double), (- _col6) (type: double), power(((_col10 - ((_col11 * _col11) / _col12)) / _col12), 0.5) (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col13 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col14 (type: tinyint)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
             Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -420,26 +421,27 @@ STAGE PLANS:
               predicate: (((UDFToDouble(ctimestamp1) > -1.388D) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999D) and (CAST( ctinyint AS decimal(11,4)) < 9763215.5639)) or ((cfloat < 3569) and (cdouble <= 10.175D) and (cboolean1 <> 1))) (type: boolean)
               Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
-                outputColumnNames: ctinyint, cfloat, cstring1, ctimestamp1, cboolean1
+                expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), UDFToDouble(cfloat) (type: double), (UDFToDouble(cfloat) * UDFToDouble(cfloat)) (type: double), UDFToDouble(ctinyint) (type: double), (UDFToDouble(ctinyint) * UDFToDouble(ctinyint)) (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                 Select Vectorization:
                     className: VectorSelectOperator
                     native: true
-                    projectedOutputColumnNums: [0, 4, 6, 8, 10]
+                    projectedOutputColumnNums: [10, 0, 8, 4, 6, 4, 13, 15, 18]
+                    selectExpressions: DoubleColMultiplyDoubleColumn(col 4:double, col 4:double)(children: col 4:float, col 4:float) -> 13:double, CastLongToDouble(col 0:tinyint) -> 15:double, DoubleColMultiplyDoubleColumn(col 16:double, col 17:double)(children: CastLongToDouble(col 0:tinyint) -> 16:double, CastLongToDouble(col 0:tinyint) -> 17:double) -> 18:double
                 Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: max(ctinyint), sum(cfloat), stddev_pop(cfloat), stddev_pop(ctinyint), max(cfloat), min(ctinyint)
+                  aggregations: max(_col1), sum(_col3), sum(_col6), sum(_col5), count(_col3), sum(_col8), sum(_col7), count(_col1), max(_col3), min(_col1)
                   Group By Vectorization:
-                      aggregators: VectorUDAFMaxLong(col 0:tinyint) -> tinyint, VectorUDAFSumDouble(col 4:float) -> double, VectorUDAFVarDouble(col 4:float) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_pop, VectorUDAFVarLong(col 0:tinyint) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_pop, VectorUDAFMaxDouble(col 4:float) -> float, VectorUDAFMinLong(col 0:tinyint) -> tinyint
+                      aggregators: VectorUDAFMaxLong(col 0:tinyint) -> tinyint, VectorUDAFSumDouble(col 4:float) -> double, VectorUDAFSumDouble(col 13:double) -> double, VectorUDAFSumDouble(col 4:double) -> double, VectorUDAFCount(col 4:float) -> bigint, VectorUDAFSumDouble(col 18:double) -> double, VectorUDAFSumDouble(col 15:double) -> double, VectorUDAFCount(col 0:tinyint) -> bigint, VectorUDAFMaxDouble(col 4:float) -> float, VectorUDAFMinLong(col 0:tinyint) -> tinyint
                       className: VectorGroupByOperator
                       groupByMode: HASH
                       keyExpressions: col 10:boolean, col 0:tinyint, col 8:timestamp, col 4:float, col 6:string
                       native: false
                       vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5]
-                  keys: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
+                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+                  keys: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
                   Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
@@ -451,7 +453,7 @@ STAGE PLANS:
                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                     Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: float), _col10 (type: tinyint)
+                    value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: bigint), _col10 (type: double), _col11 (type: double), _col12 (type: bigint), _col13 (type: float), _col14 (type: tinyint)
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -468,13 +470,13 @@ STAGE PLANS:
           enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Reduce Operator Tree:
         Group By Operator
-          aggregations: max(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), min(VALUE._col5)
+          aggregations: max(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2), sum(VALUE._col3), count(VALUE._col4), sum(VALUE._col5), sum(VALUE._col6), count(VALUE._col7), max(VALUE._col8), min(VALUE._col9)
           keys: KEY._col0 (type: boolean), KEY._col1 (type: tinyint), KEY._col2 (type: timestamp), KEY._col3 (type: float), KEY._col4 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
           Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint)
+            expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * _col3) (type: float), power(((_col7 - ((_col8 * _col8) / _col9)) / _col9), 0.5) (type: double), (- _col6) (type: double), power(((_col10 - ((_col11 * _col11) / _col12)) / _col12), 0.5) (type: double), (CAST( ((- _col1) + _col5) AS decimal(3,0)) - 10.175) (type: decimal(7,3)), (- (- _col6)) (type: double), (-26.28D / (- (- _col6))) (type: double), _col13 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col14 (type: tinyint)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
             Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/5cb8867b/ql/src/test/results/clientpositive/vectorization_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_14.q.out b/ql/src/test/results/clientpositive/vectorization_14.q.out
index 02a986c..7a7a817 100644
--- a/ql/src/test/results/clientpositive/vectorization_14.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_14.q.out
@@ -91,27 +91,27 @@ STAGE PLANS:
               predicate: (((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and ((cbigint > -257L) or (cfloat < UDFToFloat(cint))) and (UDFToLong(ctinyint) <= cbigint) and (cdouble < UDFToDouble(ctinyint))) (type: boolean)
               Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28D + cdouble)) (type: double)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28D + cdouble)) (type: double), ((- (-26.28D + cdouble)) * (- (-26.28D + cdouble))) (type: double), UDFToDouble(cfloat) (type: double), (UDFToDouble(cfloat) * UDFToDouble(cfloat)) (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                 Select Vectorization:
                     className: VectorSelectOperator
                     native: true
-                    projectedOutputColumnNums: [8, 4, 6, 10, 5, 14]
-                    selectExpressions: DoubleColUnaryMinus(col 13:double)(children: DoubleScalarAddDoubleColumn(val -26.28, col 5:double) -> 13:double) -> 14:double
+                    projectedOutputColumnNums: [8, 4, 6, 10, 5, 14, 13, 4, 15]
+                    selectExpressions: DoubleColUnaryMinus(col 13:double)(children: DoubleScalarAddDoubleColumn(val -26.28, col 5:double) -> 13:double) -> 14:double, DoubleColMultiplyDoubleColumn(col 15:double, col 16:double)(children: DoubleColUnaryMinus(col 13:double)(children: DoubleScalarAddDoubleColumn(val -26.28, col 5:double) -> 13:double) -> 15:double, DoubleColUnaryMinus(col 13:double)(children: DoubleScalarAddDoubleColumn(val -26.28, col 5:double) -> 13:double) -> 16:double) -> 13:double, DoubleColMultiplyDoubleColumn(col 4:double, col 4:double)(children: col 4:float, col 4:float) -> 15:double
                 Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: stddev_samp(_col5), max(_col1), stddev_pop(_col1), count(_col1), var_pop(_col1), var_samp(_col1)
+                  aggregations: sum(_col6), sum(_col5), count(_col5), max(_col1), sum(_col8), sum(_col7), count(_col1)
                   Group By Vectorization:
-                      aggregators: VectorUDAFVarDouble(col 14:double) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_samp, VectorUDAFMaxDouble(col 4:float) -> float, VectorUDAFVarDouble(col 4:float) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_pop, VectorUDAFCount(col 4:float) -> bigint, VectorUDAFVarDouble(col 4:float) -> struct<count:bigint,sum:double,variance:double> aggregation: var_pop, VectorUDAFVarDouble(col 4:float) -> struct<count:bigint,sum:double,variance:double> aggregation: var_samp
+                      aggregators: VectorUDAFSumDouble(col 13:double) -> double, VectorUDAFSumDouble(col 14:double) -> double, VectorUDAFCount(col 14:double) -> bigint, VectorUDAFMaxDouble(col 4:float) -> float, VectorUDAFSumDouble(col 15:double) -> double, VectorUDAFSumDouble(col 4:double) -> double, VectorUDAFCount(col 4:float) -> bigint
                       className: VectorGroupByOperator
                       groupByMode: HASH
                       keyExpressions: col 6:string, col 4:float, col 5:double, col 8:timestamp, col 10:boolean
                       native: false
                       vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5]
+                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6]
                   keys: _col2 (type: string), _col1 (type: float), _col4 (type: double), _col0 (type: timestamp), _col3 (type: boolean)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
                   Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string), _col1 (type: float), _col2 (type: double), _col3 (type: timestamp), _col4 (type: boolean)
@@ -123,7 +123,7 @@ STAGE PLANS:
                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                     Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: float), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: bigint), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,variance:double>)
+                    value expressions: _col5 (type: double), _col6 (type: double), _col7 (type: bigint), _col8 (type: float), _col9 (type: double), _col10 (type: double), _col11 (type: bigint)
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -139,20 +139,20 @@ STAGE PLANS:
               includeColumns: [0, 2, 3, 4, 5, 6, 8, 9, 10]
               dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
               partitionColumnCount: 0
-              scratchColumnTypeNames: [double, double]
+              scratchColumnTypeNames: [double, double, double, double]
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
           enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Reduce Operator Tree:
         Group By Operator
-          aggregations: stddev_samp(VALUE._col0), max(VALUE._col1), stddev_pop(VALUE._col2), count(VALUE._col3), var_pop(VALUE._col4), var_samp(VALUE._col5)
+          aggregations: sum(VALUE._col0), sum(VALUE._col1), count(VALUE._col2), max(VALUE._col3), sum(VALUE._col4), sum(VALUE._col5), count(VALUE._col6)
           keys: KEY._col0 (type: string), KEY._col1 (type: float), KEY._col2 (type: double), KEY._col3 (type: timestamp), KEY._col4 (type: boolean)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
           Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28D + _col2) (type: double), (- (-26.28D + _col2)) (type: double), _col5 (type: double), (_col1 * -26.28) (type: float), _col6 (type: float), (- _col1) (type: float), (- _col6) (type: float), ((- (-26.28D + _col2)) / 10.175D) (type: double), _col7 (type: double), _col8 (type: bigint), (- ((- (-26.28D + _col2)) / 10.175D)) (type: double), (-1.389D % _col5) (type: double), (UDFToDouble(_col1) - _col2) (type: double), _col9 (type: double), (_col9 % 10.175D) (type: double), _col10 (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double)
+            expressions: _col3 (type: timestamp), _col1 (type: float), _col0 (type: string), _col4 (type: boolean), _col2 (type: double), (-26.28D + _col2) (type: double), (- (-26.28D + _col2)) (type: double), power(((_col5 - ((_col6 * _col6) / _col7)) / CASE WHEN ((_col7 = 1L)) THEN (null) ELSE ((_col7 - 1)) END), 0.5) (type: double), (_col1 * -26.28) (type: float), _col8 (type: float), (- _col1) (type: float), (- _col8) (type: float), ((- (-26.28D + _col2)) / 10.175D) (type: double), power(((_col9 - ((_col10 * _col10) / _col11)) / _col11), 0.5) (type: double), _col11 (type: bigint), (- ((- (-26.28D + _col2)) / 10.175D)) (type: double), (-1.389D % power(((_col5 - ((_col6 * _col6) / _col7)) / CASE WHEN ((_col7 = 1L)) THEN (null) ELSE ((_col7 - 1)) END), 0.5)) (type: double), (UDFToDouble(_col1) - _col2) (type: double), ((_col9 - ((_col10 * _col10) / _col11)) / _col11) (type: double), (((_col9 - ((_col10 * _col10) / _col11)) / _col11) % 10.175D) (type: double), ((_col9 - ((_col10 * _
 col10) / _col11)) / CASE WHEN ((_col11 = 1L)) THEN (null) ELSE ((_col11 - 1)) END) (type: double), (- (UDFToDouble(_col1) - _col2)) (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21
             Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/5cb8867b/ql/src/test/results/clientpositive/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_15.q.out b/ql/src/test/results/clientpositive/vectorization_15.q.out
index a8d681b..dbef3e7 100644
--- a/ql/src/test/results/clientpositive/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_15.q.out
@@ -87,26 +87,27 @@ STAGE PLANS:
               predicate: (((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0D)) or (cstring1 like '10%') or (cstring2 like '%ss%')) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
-                outputColumnNames: ctinyint, cint, cfloat, cdouble, cstring1, ctimestamp1, cboolean1
+                expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp), UDFToDouble(cfloat) (type: double), (UDFToDouble(cfloat) * UDFToDouble(cfloat)) (type: double), UDFToDouble(ctinyint) (type: double), (UDFToDouble(ctinyint) * UDFToDouble(ctinyint)) (type: double), UDFToDouble(cint) (type: double), (UDFToDouble(cint) * UDFToDouble(cint)) (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
                 Select Vectorization:
                     className: VectorSelectOperator
                     native: true
-                    projectedOutputColumnNums: [0, 2, 4, 5, 6, 8, 10]
+                    projectedOutputColumnNums: [4, 10, 5, 6, 0, 2, 8, 4, 13, 14, 17, 15, 19]
+                    selectExpressions: DoubleColMultiplyDoubleColumn(col 4:double, col 4:double)(children: col 4:float, col 4:float) -> 13:double, CastLongToDouble(col 0:tinyint) -> 14:double, DoubleColMultiplyDoubleColumn(col 15:double, col 16:double)(children: CastLongToDouble(col 0:tinyint) -> 15:double, CastLongToDouble(col 0:tinyint) -> 16:double) -> 17:double, CastLongToDouble(col 2:int) -> 15:double, DoubleColMultiplyDoubleColumn(col 16:double, col 18:double)(children: CastLongToDouble(col 2:int) -> 16:double, CastLongToDouble(col 2:int) -> 18:double) -> 19:double
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: stddev_samp(cfloat), min(cdouble), stddev_samp(ctinyint), var_pop(ctinyint), var_samp(cint), stddev_pop(cint)
+                  aggregations: sum(_col8), sum(_col7), count(_col0), min(_col2), sum(_col10), sum(_col9), count(_col4), sum(_col12), sum(_col11), count(_col5)
                   Group By Vectorization:
-                      aggregators: VectorUDAFVarDouble(col 4:float) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_samp, VectorUDAFMinDouble(col 5:double) -> double, VectorUDAFVarLong(col 0:tinyint) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_samp, VectorUDAFVarLong(col 0:tinyint) -> struct<count:bigint,sum:double,variance:double> aggregation: var_pop, VectorUDAFVarLong(col 2:int) -> struct<count:bigint,sum:double,variance:double> aggregation: var_samp, VectorUDAFVarLong(col 2:int) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_pop
+                      aggregators: VectorUDAFSumDouble(col 13:double) -> double, VectorUDAFSumDouble(col 4:double) -> double, VectorUDAFCount(col 4:float) -> bigint, VectorUDAFMinDouble(col 5:double) -> double, VectorUDAFSumDouble(col 17:double) -> double, VectorUDAFSumDouble(col 14:double) -> double, VectorUDAFCount(col 0:tinyint) -> bigint, VectorUDAFSumDouble(col 19:double) -> double, VectorUDAFSumDouble(col 15:double) -> double, VectorUDAFCount(col 2:int) -> bigint
                       className: VectorGroupByOperator
                       groupByMode: HASH
                       keyExpressions: col 4:float, col 10:boolean, col 5:double, col 6:string, col 0:tinyint, col 2:int, col 8:timestamp
                       native: false
                       vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5]
-                  keys: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)
+                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+                  keys: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
@@ -118,7 +119,7 @@ STAGE PLANS:
                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: double), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,variance:double>), _col11 (type: struct<count:bigint,sum:double,variance:double>), _col12 (type: struct<count:bigint,sum:double,variance:double>)
+                    value expressions: _col7 (type: double), _col8 (type: double), _col9 (type: bigint), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: bigint), _col14 (type: double), _col15 (type: double), _col16 (type: bigint)
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -134,19 +135,19 @@ STAGE PLANS:
               includeColumns: [0, 1, 2, 4, 5, 6, 7, 8, 10]
               dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
               partitionColumnCount: 0
-              scratchColumnTypeNames: []
+              scratchColumnTypeNames: [double, double, double, double, double, double, double]
       Reduce Vectorization:
           enabled: false
           enableConditionsNotMet: hive.vectorized.execution.reduce.enabled IS false, hive.execution.engine mr IN [tez, spark] IS false
       Reduce Operator Tree:
         Group By Operator
-          aggregations: stddev_samp(VALUE._col0), min(VALUE._col1), stddev_samp(VALUE._col2), var_pop(VALUE._col3), var_samp(VALUE._col4), stddev_pop(VALUE._col5)
+          aggregations: sum(VALUE._col0), sum(VALUE._col1), count(VALUE._col2), min(VALUE._col3), sum(VALUE._col4), sum(VALUE._col5), count(VALUE._col6), sum(VALUE._col7), sum(VALUE._col8), count(VALUE._col9)
           keys: KEY._col0 (type: float), KEY._col1 (type: boolean), KEY._col2 (type: double), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int), KEY._col6 (type: timestamp)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
           Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col8 (type: double), (_col2 * 79.553D) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0D % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), _col12 (type: double)
+            expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), power(((_col7 - ((_col8 * _col8) / _col9)) / CASE WHEN ((_col9 = 1L)) THEN (null) ELSE ((_col9 - 1)) END), 0.5) (type: double), (-26.28 - CAST( _col5 AS decimal(10,0))) (type: decimal(13,2)), _col10 (type: double), (_col2 * 79.553D) (type: double), (33.0 % _col0) (type: float), power(((_col11 - ((_col12 * _col12) / _col13)) / CASE WHEN ((_col13 = 1L)) THEN (null) ELSE ((_col13 - 1)) END), 0.5) (type: double), ((_col11 - ((_col12 * _col12) / _col13)) / _col13) (type: double), (-23.0D % _col2) (type: double), (- _col4) (type: tinyint), ((_col14 - ((_col15 * _col15) / _col16)) / CASE WHEN ((_col16 = 1L)) THEN (null) ELSE ((_col16 - 1)) END) (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - CAST( _col5 AS decimal(10,0)))) (type: decimal(13,2)), pow
 er(((_col14 - ((_col15 * _col15) / _col16)) / _col16), 0.5) (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
             Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/5cb8867b/ql/src/test/results/clientpositive/vectorization_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_16.q.out b/ql/src/test/results/clientpositive/vectorization_16.q.out
index 97c6ffb..571eae0 100644
--- a/ql/src/test/results/clientpositive/vectorization_16.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_16.q.out
@@ -64,38 +64,39 @@ STAGE PLANS:
               predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean)
               Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
-                outputColumnNames: cdouble, cstring1, ctimestamp1
+                expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp), (cdouble * cdouble) (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3
                 Select Vectorization:
                     className: VectorSelectOperator
                     native: true
-                    projectedOutputColumnNums: [5, 6, 8]
+                    projectedOutputColumnNums: [6, 5, 8, 13]
+                    selectExpressions: DoubleColMultiplyDoubleColumn(col 5:double, col 5:double) -> 13:double
                 Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble)
+                  aggregations: count(_col1), sum(_col3), sum(_col1), min(_col1)
                   Group By Vectorization:
-                      aggregators: VectorUDAFCount(col 5:double) -> bigint, VectorUDAFVarDouble(col 5:double) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_samp, VectorUDAFMinDouble(col 5:double) -> double
+                      aggregators: VectorUDAFCount(col 5:double) -> bigint, VectorUDAFSumDouble(col 13:double) -> double, VectorUDAFSumDouble(col 5:double) -> double, VectorUDAFMinDouble(col 5:double) -> double
                       className: VectorGroupByOperator
                       groupByMode: HASH
-                      keyExpressions: col 5:double, col 6:string, col 8:timestamp
+                      keyExpressions: col 6:string, col 5:double, col 8:timestamp
                       native: false
                       vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0, 1, 2]
-                  keys: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
+                      projectedOutputColumnNums: [0, 1, 2, 3]
+                  keys: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                   Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
+                    key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
                     sort order: +++
-                    Map-reduce partition columns: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkOperator
                         native: false
                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                     Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
+                    value expressions: _col3 (type: bigint), _col4 (type: double), _col5 (type: double), _col6 (type: double)
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -111,20 +112,20 @@ STAGE PLANS:
               includeColumns: [5, 6, 7, 8]
               dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
               partitionColumnCount: 0
-              scratchColumnTypeNames: []
+              scratchColumnTypeNames: [double]
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
           enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)
-          keys: KEY._col0 (type: double), KEY._col1 (type: string), KEY._col2 (type: timestamp)
+          aggregations: count(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2), min(VALUE._col3)
+          keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
           Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double)
+            expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639D) (type: double), (- (_col1 - 9763215.5639D)) (type: double), _col3 (type: bigint), power(((_col4 - ((_col5 * _col5) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END), 0.5) (type: double), (- power(((_col4 - ((_col5 * _col5) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END), 0.5)) (type: double), (power(((_col4 - ((_col5 * _col5) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END), 0.5) * UDFToDouble(_col3)) (type: double), _col6 (type: double), (9763215.5639D / _col1) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), power(((_col4 - ((_col5 * _col5) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END), 0.5) (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
             Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/5cb8867b/ql/src/test/results/clientpositive/vectorization_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_2.q.out b/ql/src/test/results/clientpositive/vectorization_2.q.out
index 1df05af..e3d6ad0 100644
--- a/ql/src/test/results/clientpositive/vectorization_2.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_2.q.out
@@ -70,25 +70,26 @@ STAGE PLANS:
               predicate: (((cdouble < UDFToDouble(ctinyint)) and ((UDFToDouble(ctimestamp2) <> -10669.0D) or (cint < 359))) or ((ctimestamp1 < ctimestamp2) and (cstring2 like 'b%') and (cfloat <= -5638.15))) (type: boolean)
               Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: ctinyint (type: tinyint), csmallint (type: smallint), cbigint (type: bigint), cfloat (type: float), cdouble (type: double)
-                outputColumnNames: ctinyint, csmallint, cbigint, cfloat, cdouble
+                expressions: csmallint (type: smallint), cfloat (type: float), cbigint (type: bigint), ctinyint (type: tinyint), cdouble (type: double), UDFToDouble(cbigint) (type: double), (UDFToDouble(cbigint) * UDFToDouble(cbigint)) (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                 Select Vectorization:
                     className: VectorSelectOperator
                     native: true
-                    projectedOutputColumnNums: [0, 1, 3, 4, 5]
+                    projectedOutputColumnNums: [1, 4, 3, 0, 5, 13, 16]
+                    selectExpressions: CastLongToDouble(col 3:bigint) -> 13:double, DoubleColMultiplyDoubleColumn(col 14:double, col 15:double)(children: CastLongToDouble(col 3:bigint) -> 14:double, CastLongToDouble(col 3:bigint) -> 15:double) -> 16:double
                 Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: avg(csmallint), sum(cfloat), var_pop(cbigint), count(), min(ctinyint), avg(cdouble)
+                  aggregations: sum(_col0), count(_col0), sum(_col1), sum(_col6), sum(_col5), count(_col2), count(), min(_col3), sum(_col4), count(_col4)
                   Group By Vectorization:
-                      aggregators: VectorUDAFAvgLong(col 1:smallint) -> struct<count:bigint,sum:double,input:smallint>, VectorUDAFSumDouble(col 4:float) -> double, VectorUDAFVarLong(col 3:bigint) -> struct<count:bigint,sum:double,variance:double> aggregation: var_pop, VectorUDAFCountStar(*) -> bigint, VectorUDAFMinLong(col 0:tinyint) -> tinyint, VectorUDAFAvgDouble(col 5:double) -> struct<count:bigint,sum:double,input:double>
+                      aggregators: VectorUDAFSumLong(col 1:smallint) -> bigint, VectorUDAFCount(col 1:smallint) -> bigint, VectorUDAFSumDouble(col 4:float) -> double, VectorUDAFSumDouble(col 16:double) -> double, VectorUDAFSumDouble(col 13:double) -> double, VectorUDAFCount(col 3:bigint) -> bigint, VectorUDAFCountStar(*) -> bigint, VectorUDAFMinLong(col 0:tinyint) -> tinyint, VectorUDAFSumDouble(col 5:double) -> double, VectorUDAFCount(col 5:double) -> bigint
                       className: VectorGroupByOperator
                       groupByMode: HASH
                       native: false
                       vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5]
+                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                  Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+                  Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     sort order: 
                     Reduce Sink Vectorization:
@@ -96,8 +97,8 @@ STAGE PLANS:
                         native: false
                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-                    Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: struct<count:bigint,sum:double,input:smallint>), _col1 (type: double), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: bigint), _col4 (type: tinyint), _col5 (type: struct<count:bigint,sum:double,input:double>)
+                    Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: tinyint), _col8 (type: double), _col9 (type: bigint)
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -113,24 +114,24 @@ STAGE PLANS:
               includeColumns: [0, 1, 2, 3, 4, 5, 7, 8, 9]
               dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
               partitionColumnCount: 0
-              scratchColumnTypeNames: [double]
+              scratchColumnTypeNames: [double, double, double, double]
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
           enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Reduce Operator Tree:
         Group By Operator
-          aggregations: avg(VALUE._col0), sum(VALUE._col1), var_pop(VALUE._col2), count(VALUE._col3), min(VALUE._col4), avg(VALUE._col5)
+          aggregations: sum(VALUE._col0), count(VALUE._col1), sum(VALUE._col2), sum(VALUE._col3), sum(VALUE._col4), count(VALUE._col5), count(VALUE._col6), min(VALUE._col7), sum(VALUE._col8), count(VALUE._col9)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+          Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: double), (_col0 % -563.0D) (type: double), (_col0 + 762.0D) (type: double), _col1 (type: double), _col2 (type: double), (- _col2) (type: double), (_col1 - _col0) (type: double), _col3 (type: bigint), (- (_col1 - _col0)) (type: double), (_col2 - 762.0D) (type: double), _col4 (type: tinyint), ((- _col2) + UDFToDouble(_col4)) (type: double), _col5 (type: double), (((- _col2) + UDFToDouble(_col4)) - _col1) (type: double)
+            expressions: (_col0 / _col1) (type: double), ((_col0 / _col1) % -563.0D) (type: double), ((_col0 / _col1) + 762.0D) (type: double), _col2 (type: double), ((_col3 - ((_col4 * _col4) / _col5)) / _col5) (type: double), (- ((_col3 - ((_col4 * _col4) / _col5)) / _col5)) (type: double), (_col2 - (_col0 / _col1)) (type: double), _col6 (type: bigint), (- (_col2 - (_col0 / _col1))) (type: double), (((_col3 - ((_col4 * _col4) / _col5)) / _col5) - 762.0D) (type: double), _col7 (type: tinyint), ((- ((_col3 - ((_col4 * _col4) / _col5)) / _col5)) + UDFToDouble(_col7)) (type: double), (_col8 / _col9) (type: double), (((- ((_col3 - ((_col4 * _col4) / _col5)) / _col5)) + UDFToDouble(_col7)) - _col2) (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-            Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -190,4 +191,4 @@ WHERE  (((ctimestamp1 < ctimestamp2)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--5646.467075892857	-16.467075892856883	-4884.467075892857	-2839.634998679161	1.49936299222378778E18	-1.49936299222378778E18	2806.832077213696	3584	-2806.832077213696	1.49936299222378701E18	-64	-1.49936299222378778E18	-5650.1297631138395	-1.49936299222378496E18
+-5646.467075892857	-16.467075892856883	-4884.467075892857	-2839.634998679161	1.49936299222378906E18	-1.49936299222378906E18	2806.832077213696	3584	-2806.832077213696	1.49936299222378829E18	-64	-1.49936299222378906E18	-5650.1297631138395	-1.49936299222378624E18

http://git-wip-us.apache.org/repos/asf/hive/blob/5cb8867b/ql/src/test/results/clientpositive/vectorization_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_3.q.out b/ql/src/test/results/clientpositive/vectorization_3.q.out
index 0cee254..bb6c014 100644
--- a/ql/src/test/results/clientpositive/vectorization_3.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_3.q.out
@@ -75,25 +75,26 @@ STAGE PLANS:
               predicate: (((UDFToDouble(cbigint) > cdouble) and (CAST( csmallint AS decimal(8,3)) >= 79.553) and (ctimestamp1 > ctimestamp2)) or ((UDFToFloat(cint) <= cfloat) and (CAST( cbigint AS decimal(22,3)) <> 79.553) and (UDFToDouble(ctimestamp2) = -29071.0D))) (type: boolean)
               Statistics: Num rows: 2503 Data size: 538153 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cfloat (type: float)
-                outputColumnNames: ctinyint, csmallint, cint, cfloat
+                expressions: csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cint (type: int), UDFToDouble(csmallint) (type: double), (UDFToDouble(csmallint) * UDFToDouble(csmallint)) (type: double), UDFToDouble(ctinyint) (type: double), (UDFToDouble(ctinyint) * UDFToDouble(ctinyint)) (type: double), UDFToDouble(cfloat) (type: double), (UDFToDouble(cfloat) * UDFToDouble(cfloat)) (type: double), UDFToDouble(cint) (type: double), (UDFToDouble(cint) * UDFToDouble(cint)) (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
                 Select Vectorization:
                     className: VectorSelectOperator
                     native: true
-                    projectedOutputColumnNums: [0, 1, 2, 4]
+                    projectedOutputColumnNums: [1, 0, 4, 2, 13, 18, 16, 20, 4, 17, 19, 23]
+                    selectExpressions: CastLongToDouble(col 1:smallint) -> 13:double, DoubleColMultiplyDoubleColumn(col 16:double, col 17:double)(children: CastLongToDouble(col 1:smallint) -> 16:double, CastLongToDouble(col 1:smallint) -> 17:double) -> 18:double, CastLongToDouble(col 0:tinyint) -> 16:double, DoubleColMultiplyDoubleColumn(col 17:double, col 19:double)(children: CastLongToDouble(col 0:tinyint) -> 17:double, CastLongToDouble(col 0:tinyint) -> 19:double) -> 20:double, DoubleColMultiplyDoubleColumn(col 4:double, col 4:double)(children: col 4:float, col 4:float) -> 17:double, CastLongToDouble(col 2:int) -> 19:double, DoubleColMultiplyDoubleColumn(col 21:double, col 22:double)(children: CastLongToDouble(col 2:int) -> 21:double, CastLongToDouble(col 2:int) -> 22:double) -> 23:double
                 Statistics: Num rows: 2503 Data size: 538153 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: stddev_samp(csmallint), stddev_pop(ctinyint), stddev_samp(cfloat), sum(cfloat), avg(cint), stddev_pop(cint)
+                  aggregations: sum(_col5), sum(_col4), count(_col0), sum(_col7), sum(_col6), count(_col1), sum(_col9), sum(_col8), count(_col2), sum(_col2), sum(_col3), count(_col3), sum(_col11), sum(_col10)
                   Group By Vectorization:
-                      aggregators: VectorUDAFVarLong(col 1:smallint) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_samp, VectorUDAFVarLong(col 0:tinyint) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_pop, VectorUDAFVarDouble(col 4:float) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_samp, VectorUDAFSumDouble(col 4:float) -> double, VectorUDAFAvgLong(col 2:int) -> struct<count:bigint,sum:double,input:int>, VectorUDAFVarLong(col 2:int) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_pop
+                      aggregators: VectorUDAFSumDouble(col 18:double) -> double, VectorUDAFSumDouble(col 13:double) -> double, VectorUDAFCount(col 1:smallint) -> bigint, VectorUDAFSumDouble(col 20:double) -> double, VectorUDAFSumDouble(col 16:double) -> double, VectorUDAFCount(col 0:tinyint) -> bigint, VectorUDAFSumDouble(col 17:double) -> double, VectorUDAFSumDouble(col 4:double) -> double, VectorUDAFCount(col 4:float) -> bigint, VectorUDAFSumDouble(col 4:float) -> double, VectorUDAFSumLong(col 2:int) -> bigint, VectorUDAFCount(col 2:int) -> bigint, VectorUDAFSumDouble(col 23:double) -> double, VectorUDAFSumDouble(col 19:double) -> double
                       className: VectorGroupByOperator
                       groupByMode: HASH
                       native: false
                       vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5]
+                      projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                  Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
+                  Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     sort order: 
                     Reduce Sink Vectorization:
@@ -101,8 +102,8 @@ STAGE PLANS:
                         native: false
                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-                    Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: struct<count:bigint,sum:double,variance:double>), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: double), _col4 (type: struct<count:bigint,sum:double,input:int>), _col5 (type: struct<count:bigint,sum:double,variance:double>)
+                    Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: double), _col1 (type: double), _col2 (type: bigint), _col3 (type: double), _col4 (type: double), _col5 (type: bigint), _col6 (type: double), _col7 (type: double), _col8 (type: bigint), _col9 (type: double), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: double), _col13 (type: double)
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -118,24 +119,24 @@ STAGE PLANS:
               includeColumns: [0, 1, 2, 3, 4, 5, 8, 9]
               dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
               partitionColumnCount: 0
-              scratchColumnTypeNames: [double, decimal(22,3), decimal(8,3)]
+              scratchColumnTypeNames: [double, decimal(22,3), decimal(8,3), double, double, double, double, double, double, double, double]
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
           enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Reduce Operator Tree:
         Group By Operator
-          aggregations: stddev_samp(VALUE._col0), stddev_pop(VALUE._col1), stddev_samp(VALUE._col2), sum(VALUE._col3), avg(VALUE._col4), stddev_pop(VALUE._col5)
+          aggregations: sum(VALUE._col0), sum(VALUE._col1), count(VALUE._col2), sum(VALUE._col3), sum(VALUE._col4), count(VALUE._col5), sum(VALUE._col6), sum(VALUE._col7), count(VALUE._col8), sum(VALUE._col9), sum(VALUE._col10), count(VALUE._col11), sum(VALUE._col12), sum(VALUE._col13)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
+          Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: double), (_col0 - 10.175D) (type: double), _col1 (type: double), (_col0 * (_col0 - 10.175D)) (type: double), (- _col1) (type: double), (_col0 % 79.553D) (type: double), (- (_col0 * (_col0 - 10.175D))) (type: double), _col2 (type: double), (- _col0) (type: double), _col3 (type: double), ((- (_col0 * (_col0 - 10.175D))) / (_col0 - 10.175D)) (type: double), (- (_col0 - 10.175D)) (type: double), _col4 (type: double), (-3728.0D - _col0) (type: double), _col5 (type: double), (_col4 / _col2) (type: double)
+            expressions: power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) (type: double), (power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) - 10.175D) (type: double), power(((_col3 - ((_col4 * _col4) / _col5)) / _col5), 0.5) (type: double), (power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) * (power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) - 10.175D)) (type: double), (- power(((_col3 - ((_col4 * _col4) / _col5)) / _col5), 0.5)) (type: double), (power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) % 79.553D) (type: double), (- (power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) * (power(((_col0 - ((_col1 * _col1) / _c
 ol2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) - 10.175D))) (type: double), power(((_col6 - ((_col7 * _col7) / _col8)) / CASE WHEN ((_col8 = 1L)) THEN (null) ELSE ((_col8 - 1)) END), 0.5) (type: double), (- power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5)) (type: double), _col9 (type: double), ((- (power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) * (power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) - 10.175D))) / (power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) - 10.175D)) (type: double), (- (power(((_col0 - ((_col1 * _col1) / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5) - 10.175D)) (type: double), (_col10 / _col11) (type: double), (-3728.0D - power(((_col0 - ((_col1 * _col1) 
 / _col2)) / CASE WHEN ((_col2 = 1L)) THEN (null) ELSE ((_col2 - 1)) END), 0.5)) (type: double), power(((_col12 - ((_col13 * _col13) / _col11)) / _col11), 0.5) (type: double), ((_col10 / _col11) / power(((_col6 - ((_col7 * _col7) / _col8)) / CASE WHEN ((_col8 = 1L)) THEN (null) ELSE ((_col8 - 1)) END), 0.5)) (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-            Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 404 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -200,4 +201,4 @@ WHERE  (((cint <= cfloat)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-0.0	-10.175	34.287285216637066	-0.0	-34.287285216637066	0.0	0.0	34.34690095515641	-0.0	197.89499950408936	-0.0	10.175	NULL	-3728.0	NULL	NULL
+0.0	-10.175	34.287285216637066	-0.0	-34.287285216637066	0.0	0.0	34.3469009551564	-0.0	197.89499950408936	-0.0	10.175	NULL	-3728.0	NULL	NULL

http://git-wip-us.apache.org/repos/asf/hive/blob/5cb8867b/ql/src/test/results/clientpositive/vectorization_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_4.q.out b/ql/src/test/results/clientpositive/vectorization_4.q.out
index 014750b..395431c 100644
--- a/ql/src/test/results/clientpositive/vectorization_4.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_4.q.out
@@ -70,17 +70,18 @@ STAGE PLANS:
               predicate: (((UDFToInteger(ctinyint) <= -89010) and (cdouble > 79.553D)) or ((cbigint <> -563L) and ((UDFToLong(ctinyint) <> cbigint) or (cdouble <= -3728.0D))) or (UDFToInteger(csmallint) >= cint)) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: ctinyint (type: tinyint), cint (type: int), cdouble (type: double)
-                outputColumnNames: ctinyint, cint, cdouble
+                expressions: cint (type: int), cdouble (type: double), ctinyint (type: tinyint), (cdouble * cdouble) (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3
                 Select Vectorization:
                     className: VectorSelectOperator
                     native: true
-                    projectedOutputColumnNums: [0, 2, 5]
+                    projectedOutputColumnNums: [2, 5, 0, 13]
+                    selectExpressions: DoubleColMultiplyDoubleColumn(col 5:double, col 5:double) -> 13:double
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: sum(cint), stddev_pop(cdouble), avg(cdouble), var_pop(cdouble), min(ctinyint)
+                  aggregations: sum(_col0), sum(_col3), sum(_col1), count(_col1), min(_col2)
                   Group By Vectorization:
-                      aggregators: VectorUDAFSumLong(col 2:int) -> bigint, VectorUDAFVarDouble(col 5:double) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_pop, VectorUDAFAvgDouble(col 5:double) -> struct<count:bigint,sum:double,input:double>, VectorUDAFVarDouble(col 5:double) -> struct<count:bigint,sum:double,variance:double> aggregation: var_pop, VectorUDAFMinLong(col 0:tinyint) -> tinyint
+                      aggregators: VectorUDAFSumLong(col 2:int) -> bigint, VectorUDAFSumDouble(col 13:double) -> double, VectorUDAFSumDouble(col 5:double) -> double, VectorUDAFCount(col 5:double) -> bigint, VectorUDAFMinLong(col 0:tinyint) -> tinyint
                       className: VectorGroupByOperator
                       groupByMode: HASH
                       native: false
@@ -88,7 +89,7 @@ STAGE PLANS:
                       projectedOutputColumnNums: [0, 1, 2, 3, 4]
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                  Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     sort order: 
                     Reduce Sink Vectorization:
@@ -96,8 +97,8 @@ STAGE PLANS:
                         native: false
                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
-                    Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint), _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: tinyint)
+                    Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint), _col1 (type: double), _col2 (type: double), _col3 (type: bigint), _col4 (type: tinyint)
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -113,24 +114,24 @@ STAGE PLANS:
               includeColumns: [0, 1, 2, 3, 5]
               dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
               partitionColumnCount: 0
-              scratchColumnTypeNames: []
+              scratchColumnTypeNames: [double]
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
           enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Reduce Operator Tree:
         Group By Operator
-          aggregations: sum(VALUE._col0), stddev_pop(VALUE._col1), avg(VALUE._col2), var_pop(VALUE._col3), min(VALUE._col4)
+          aggregations: sum(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2), count(VALUE._col3), min(VALUE._col4)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: bigint), (_col0 * -563L) (type: bigint), (-3728L + _col0) (type: bigint), _col1 (type: double), (- _col1) (type: double), _col2 (type: double), ((_col0 * -563L) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563L) % _col0)) / _col2) (type: double), _col3 (type: double), (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2)) (type: double), ((-3728L + _col0) - (_col0 * -563L)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563L) % _col0)) / _col2))) (type: double)
+            expressions: _col0 (type: bigint), (_col0 * -563L) (type: bigint), (-3728L + _col0) (type: bigint), power(((_col1 - ((_col2 * _col2) / _col3)) / _col3), 0.5) (type: double), (- power(((_col1 - ((_col2 * _col2) / _col3)) / _col3), 0.5)) (type: double), (_col2 / _col3) (type: double), ((_col0 * -563L) % _col0) (type: bigint), (UDFToDouble(((_col0 * -563L) % _col0)) / (_col2 / _col3)) (type: double), ((_col1 - ((_col2 * _col2) / _col3)) / _col3) (type: double), (- (UDFToDouble(((_col0 * -563L) % _col0)) / (_col2 / _col3))) (type: double), ((-3728L + _col0) - (_col0 * -563L)) (type: bigint), _col4 (type: tinyint), _col4 (type: tinyint), (UDFToDouble(_col4) * (- (UDFToDouble(((_col0 * -563L) % _col0)) / (_col2 / _col3)))) (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
-            Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 252 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -190,4 +191,4 @@ WHERE  (((csmallint >= cint)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
--493101012745	277615870175435	-493101016473	136727.7868296355	-136727.7868296355	2298.5515807767374	0	0.0	1.8694487691330246E10	-0.0	-278108971191908	-64	-64	0.0
+-493101012745	277615870175435	-493101016473	136727.78682963562	-136727.78682963562	2298.5515807767374	0	0.0	1.8694487691330276E10	-0.0	-278108971191908	-64	-64	0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/5cb8867b/ql/src/test/results/clientpositive/vectorization_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_9.q.out b/ql/src/test/results/clientpositive/vectorization_9.q.out
index 97c6ffb..571eae0 100644
--- a/ql/src/test/results/clientpositive/vectorization_9.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_9.q.out
@@ -64,38 +64,39 @@ STAGE PLANS:
               predicate: (((cdouble >= -1.389D) or (cstring1 < 'a')) and (cstring2 like '%b%')) (type: boolean)
               Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
-                outputColumnNames: cdouble, cstring1, ctimestamp1
+                expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp), (cdouble * cdouble) (type: double)
+                outputColumnNames: _col0, _col1, _col2, _col3
                 Select Vectorization:
                     className: VectorSelectOperator
                     native: true
-                    projectedOutputColumnNums: [5, 6, 8]
+                    projectedOutputColumnNums: [6, 5, 8, 13]
+                    selectExpressions: DoubleColMultiplyDoubleColumn(col 5:double, col 5:double) -> 13:double
                 Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble)
+                  aggregations: count(_col1), sum(_col3), sum(_col1), min(_col1)
                   Group By Vectorization:
-                      aggregators: VectorUDAFCount(col 5:double) -> bigint, VectorUDAFVarDouble(col 5:double) -> struct<count:bigint,sum:double,variance:double> aggregation: stddev_samp, VectorUDAFMinDouble(col 5:double) -> double
+                      aggregators: VectorUDAFCount(col 5:double) -> bigint, VectorUDAFSumDouble(col 13:double) -> double, VectorUDAFSumDouble(col 5:double) -> double, VectorUDAFMinDouble(col 5:double) -> double
                       className: VectorGroupByOperator
                       groupByMode: HASH
-                      keyExpressions: col 5:double, col 6:string, col 8:timestamp
+                      keyExpressions: col 6:string, col 5:double, col 8:timestamp
                       native: false
                       vectorProcessingMode: HASH
-                      projectedOutputColumnNums: [0, 1, 2]
-                  keys: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
+                      projectedOutputColumnNums: [0, 1, 2, 3]
+                  keys: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                   Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
+                    key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
                     sort order: +++
-                    Map-reduce partition columns: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkOperator
                         native: false
                         nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                         nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                     Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
+                    value expressions: _col3 (type: bigint), _col4 (type: double), _col5 (type: double), _col6 (type: double)
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -111,20 +112,20 @@ STAGE PLANS:
               includeColumns: [5, 6, 7, 8]
               dataColumns: ctinyint:tinyint, csmallint:smallint, cint:int, cbigint:bigint, cfloat:float, cdouble:double, cstring1:string, cstring2:string, ctimestamp1:timestamp, ctimestamp2:timestamp, cboolean1:boolean, cboolean2:boolean
               partitionColumnCount: 0
-              scratchColumnTypeNames: []
+              scratchColumnTypeNames: [double]
       Reduce Vectorization:
           enabled: false
           enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
           enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Reduce Operator Tree:
         Group By Operator
-          aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)
-          keys: KEY._col0 (type: double), KEY._col1 (type: string), KEY._col2 (type: timestamp)
+          aggregations: count(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2), min(VALUE._col3)
+          keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
           Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639D) (type: double), (- (_col0 - 9763215.5639D)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639D / _col0) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), _col4 (type: double)
+            expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639D) (type: double), (- (_col1 - 9763215.5639D)) (type: double), _col3 (type: bigint), power(((_col4 - ((_col5 * _col5) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END), 0.5) (type: double), (- power(((_col4 - ((_col5 * _col5) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END), 0.5)) (type: double), (power(((_col4 - ((_col5 * _col5) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END), 0.5) * UDFToDouble(_col3)) (type: double), _col6 (type: double), (9763215.5639D / _col1) (type: double), (CAST( _col3 AS decimal(19,0)) / -1.389) (type: decimal(28,6)), power(((_col4 - ((_col5 * _col5) / _col3)) / CASE WHEN ((_col3 = 1L)) THEN (null) ELSE ((_col3 - 1)) END), 0.5) (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
             Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/5cb8867b/ql/src/test/results/clientpositive/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_limit.q.out b/ql/src/test/results/clientpositive/vectorization_limit.q.out
index a4ff11d..8226fd4 100644
--- a/ql/src/test/results/clientpositive/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_limit.q.out
@@ -222,18 +222,18 @@ STAGE PLANS:
                   selectExpressions: DoubleColAddDoubleScalar(col 5:double, val 1.0) -> 13:double
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: avg(_col1)
+                aggregations: sum(_col1), count(_col1)
                 Group By Vectorization:
-                    aggregators: VectorUDAFAvgDouble(col 13:double) -> struct<count:bigint,sum:double,input:double>
+                    aggregators: VectorUDAFSumDouble(col 13:double) -> double, VectorUDAFCount(col 13:double) -> bigint
                     className: VectorGroupByOperator
                     groupByMode: HASH
                     keyExpressions: col 0:tinyint
                     native: false
                     vectorProcessingMode: HASH
-                    projectedOutputColumnNums: [0]
+                    projectedOutputColumnNums: [0, 1]
                 keys: _col0 (type: tinyint)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: tinyint)
@@ -246,7 +246,7 @@ STAGE PLANS:
                       nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.3
-                  value expressions: _col1 (type: struct<count:bigint,sum:double,input:double>)
+                  value expressions: _col1 (type: double), _col2 (type: bigint)
       Execution mode: vectorized
       Map Vectorization:
           enabled: true
@@ -269,21 +269,25 @@ STAGE PLANS:
           enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
       Reduce Operator Tree:
         Group By Operator
-          aggregations: avg(VALUE._col0)
+          aggregations: sum(VALUE._col0), count(VALUE._col1)
           keys: KEY._col0 (type: tinyint)
           mode: mergepartial
-          outputColumnNames: _col0, _col1
+          outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 20
-            Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
+          Select Operator
+            expressions: _col0 (type: tinyint), (_col1 / _col2) (type: double)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 20
               Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator